Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/CodeGenOptions.cpp | //===--- CodeGenOptions.cpp -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/CodeGenOptions.h"
#include <string.h>
namespace clang {
CodeGenOptions::CodeGenOptions() {
#define CODEGENOPT(Name, Bits, Default) Name = Default;
#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
#include "clang/Frontend/CodeGenOptions.def"
RelocationModel = "pic";
memcpy(CoverageVersion, "402*", 4);
}
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/MultiplexConsumer.cpp | //===- MultiplexConsumer.cpp - AST Consumer for PCH Generation --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MultiplexConsumer class. It also declares and defines
// MultiplexASTDeserializationListener and MultiplexASTMutationListener, which
// are implementation details of MultiplexConsumer.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclGroup.h"
#include "clang/Serialization/ASTDeserializationListener.h"
using namespace clang;
namespace clang {
#if 0 // HLSL Change Starts - no support for serialization
// This ASTDeserializationListener forwards its notifications to a set of
// child listeners.
class MultiplexASTDeserializationListener
: public ASTDeserializationListener {
public:
// Does NOT take ownership of the elements in L.
MultiplexASTDeserializationListener(
const std::vector<ASTDeserializationListener*>& L);
void ReaderInitialized(ASTReader *Reader) override;
void IdentifierRead(serialization::IdentID ID,
IdentifierInfo *II) override;
void MacroRead(serialization::MacroID ID, MacroInfo *MI) override;
void TypeRead(serialization::TypeIdx Idx, QualType T) override;
void DeclRead(serialization::DeclID ID, const Decl *D) override;
void SelectorRead(serialization::SelectorID iD, Selector Sel) override;
void MacroDefinitionRead(serialization::PreprocessedEntityID,
MacroDefinitionRecord *MD) override;
void ModuleRead(serialization::SubmoduleID ID, Module *Mod) override;
private:
std::vector<ASTDeserializationListener *> Listeners;
};
MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
const std::vector<ASTDeserializationListener*>& L)
: Listeners(L) {
}
void MultiplexASTDeserializationListener::ReaderInitialized(
ASTReader *Reader) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->ReaderInitialized(Reader);
}
void MultiplexASTDeserializationListener::IdentifierRead(
serialization::IdentID ID, IdentifierInfo *II) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->IdentifierRead(ID, II);
}
void MultiplexASTDeserializationListener::MacroRead(
serialization::MacroID ID, MacroInfo *MI) {
for (auto &Listener : Listeners)
Listener->MacroRead(ID, MI);
}
void MultiplexASTDeserializationListener::TypeRead(
serialization::TypeIdx Idx, QualType T) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->TypeRead(Idx, T);
}
void MultiplexASTDeserializationListener::DeclRead(
serialization::DeclID ID, const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclRead(ID, D);
}
void MultiplexASTDeserializationListener::SelectorRead(
serialization::SelectorID ID, Selector Sel) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->SelectorRead(ID, Sel);
}
void MultiplexASTDeserializationListener::MacroDefinitionRead(
serialization::PreprocessedEntityID ID, MacroDefinitionRecord *MD) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->MacroDefinitionRead(ID, MD);
}
void MultiplexASTDeserializationListener::ModuleRead(
serialization::SubmoduleID ID, Module *Mod) {
for (auto &Listener : Listeners)
Listener->ModuleRead(ID, Mod);
}
#endif // HLSL Change Ends - no support for serialization
// This ASTMutationListener forwards its notifications to a set of
// child listeners.
class MultiplexASTMutationListener : public ASTMutationListener {
public:
// Does NOT take ownership of the elements in L.
MultiplexASTMutationListener(ArrayRef<ASTMutationListener*> L);
void CompletedTagDefinition(const TagDecl *D) override;
void AddedVisibleDecl(const DeclContext *DC, const Decl *D) override;
void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) override;
void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
const ClassTemplateSpecializationDecl *D) override;
void AddedCXXTemplateSpecialization(const VarTemplateDecl *TD,
const VarTemplateSpecializationDecl *D) override;
void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
const FunctionDecl *D) override;
void ResolvedExceptionSpec(const FunctionDecl *FD) override;
void DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) override;
void ResolvedOperatorDelete(const CXXDestructorDecl *DD,
const FunctionDecl *Delete) override;
void CompletedImplicitDefinition(const FunctionDecl *D) override;
void StaticDataMemberInstantiated(const VarDecl *D) override;
void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) override;
void FunctionDefinitionInstantiated(const FunctionDecl *D) override;
void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
const ObjCPropertyDecl *OrigProp,
const ObjCCategoryDecl *ClassExt) override;
void DeclarationMarkedUsed(const Decl *D) override;
void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override;
void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override;
void AddedAttributeToRecord(const Attr *Attr,
const RecordDecl *Record) override;
private:
std::vector<ASTMutationListener*> Listeners;
};
MultiplexASTMutationListener::MultiplexASTMutationListener(
ArrayRef<ASTMutationListener*> L)
: Listeners(L.begin(), L.end()) {
}
void MultiplexASTMutationListener::CompletedTagDefinition(const TagDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->CompletedTagDefinition(D);
}
void MultiplexASTMutationListener::AddedVisibleDecl(
const DeclContext *DC, const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedVisibleDecl(DC, D);
}
void MultiplexASTMutationListener::AddedCXXImplicitMember(
const CXXRecordDecl *RD, const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedCXXImplicitMember(RD, D);
}
void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
const ClassTemplateDecl *TD, const ClassTemplateSpecializationDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
}
void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
const VarTemplateDecl *TD, const VarTemplateSpecializationDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
}
void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
const FunctionTemplateDecl *TD, const FunctionDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
}
void MultiplexASTMutationListener::ResolvedExceptionSpec(
const FunctionDecl *FD) {
for (auto &Listener : Listeners)
Listener->ResolvedExceptionSpec(FD);
}
void MultiplexASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
QualType ReturnType) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeducedReturnType(FD, ReturnType);
}
void MultiplexASTMutationListener::ResolvedOperatorDelete(
const CXXDestructorDecl *DD, const FunctionDecl *Delete) {
for (auto *L : Listeners)
L->ResolvedOperatorDelete(DD, Delete);
}
void MultiplexASTMutationListener::CompletedImplicitDefinition(
const FunctionDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->CompletedImplicitDefinition(D);
}
void MultiplexASTMutationListener::StaticDataMemberInstantiated(
const VarDecl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->StaticDataMemberInstantiated(D);
}
void MultiplexASTMutationListener::AddedObjCCategoryToInterface(
const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedObjCCategoryToInterface(CatD, IFD);
}
void MultiplexASTMutationListener::FunctionDefinitionInstantiated(
const FunctionDecl *D) {
for (auto &Listener : Listeners)
Listener->FunctionDefinitionInstantiated(D);
}
void MultiplexASTMutationListener::AddedObjCPropertyInClassExtension(
const ObjCPropertyDecl *Prop,
const ObjCPropertyDecl *OrigProp,
const ObjCCategoryDecl *ClassExt) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->AddedObjCPropertyInClassExtension(Prop, OrigProp, ClassExt);
}
void MultiplexASTMutationListener::DeclarationMarkedUsed(const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclarationMarkedUsed(D);
}
void MultiplexASTMutationListener::DeclarationMarkedOpenMPThreadPrivate(
const Decl *D) {
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DeclarationMarkedOpenMPThreadPrivate(D);
}
void MultiplexASTMutationListener::RedefinedHiddenDefinition(const NamedDecl *D,
Module *M) {
for (auto *L : Listeners)
L->RedefinedHiddenDefinition(D, M);
}
void MultiplexASTMutationListener::AddedAttributeToRecord(
const Attr *Attr,
const RecordDecl *Record) {
for (auto *L : Listeners)
L->AddedAttributeToRecord(Attr, Record);
}
} // end namespace clang
#if 1 // HLSL Change Starts - no support for serialization
MultiplexConsumer::MultiplexConsumer(
std::vector<std::unique_ptr<ASTConsumer>> C)
: Consumers(std::move(C)), MutationListener() {
std::vector<ASTMutationListener*> mutationListeners;
for (auto &Consumer : Consumers) {
if (auto *mutationListener = Consumer->GetASTMutationListener())
mutationListeners.push_back(mutationListener);
}
if (!mutationListeners.empty()) {
MutationListener =
llvm::make_unique<MultiplexASTMutationListener>(mutationListeners);
}
}
#else
MultiplexConsumer::MultiplexConsumer(
std::vector<std::unique_ptr<ASTConsumer>> C)
: Consumers(std::move(C)), MutationListener(), DeserializationListener() {
// Collect the mutation listeners and deserialization listeners of all
// children, and create a multiplex listener each if so.
std::vector<ASTMutationListener*> mutationListeners;
std::vector<ASTDeserializationListener*> serializationListeners;
for (auto &Consumer : Consumers) {
if (auto *mutationListener = Consumer->GetASTMutationListener())
mutationListeners.push_back(mutationListener);
if (auto *serializationListener = Consumer->GetASTDeserializationListener())
serializationListeners.push_back(serializationListener);
}
if (!mutationListeners.empty()) {
MutationListener =
llvm::make_unique<MultiplexASTMutationListener>(mutationListeners);
}
if (!serializationListeners.empty()) {
DeserializationListener =
llvm::make_unique<MultiplexASTDeserializationListener>(
serializationListeners);
}
}
#endif // HLSL Change Ends - no support for serialization
MultiplexConsumer::~MultiplexConsumer() {}
void MultiplexConsumer::Initialize(ASTContext &Context) {
for (auto &Consumer : Consumers)
Consumer->Initialize(Context);
}
bool MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
bool Continue = true;
for (auto &Consumer : Consumers)
Continue = Continue && Consumer->HandleTopLevelDecl(D);
return Continue;
}
void MultiplexConsumer::HandleInlineMethodDefinition(CXXMethodDecl *D) {
for (auto &Consumer : Consumers)
Consumer->HandleInlineMethodDefinition(D);
}
void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
for (auto &Consumer : Consumers)
Consumer->HandleCXXStaticMemberVarInstantiation(VD);
}
void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
for (auto &Consumer : Consumers)
Consumer->HandleInterestingDecl(D);
}
void MultiplexConsumer::HandleTranslationUnit(ASTContext &Ctx) {
for (auto &Consumer : Consumers)
Consumer->HandleTranslationUnit(Ctx);
}
void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
for (auto &Consumer : Consumers)
Consumer->HandleTagDeclDefinition(D);
}
void MultiplexConsumer::HandleTagDeclRequiredDefinition(const TagDecl *D) {
for (auto &Consumer : Consumers)
Consumer->HandleTagDeclRequiredDefinition(D);
}
void MultiplexConsumer::HandleCXXImplicitFunctionInstantiation(FunctionDecl *D){
for (auto &Consumer : Consumers)
Consumer->HandleCXXImplicitFunctionInstantiation(D);
}
void MultiplexConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
for (auto &Consumer : Consumers)
Consumer->HandleTopLevelDeclInObjCContainer(D);
}
void MultiplexConsumer::HandleImplicitImportDecl(ImportDecl *D) {
for (auto &Consumer : Consumers)
Consumer->HandleImplicitImportDecl(D);
}
void MultiplexConsumer::HandleLinkerOptionPragma(llvm::StringRef Opts) {
for (auto &Consumer : Consumers)
Consumer->HandleLinkerOptionPragma(Opts);
}
void MultiplexConsumer::HandleDetectMismatch(llvm::StringRef Name, llvm::StringRef Value) {
for (auto &Consumer : Consumers)
Consumer->HandleDetectMismatch(Name, Value);
}
void MultiplexConsumer::HandleDependentLibrary(llvm::StringRef Lib) {
for (auto &Consumer : Consumers)
Consumer->HandleDependentLibrary(Lib);
}
void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
for (auto &Consumer : Consumers)
Consumer->CompleteTentativeDefinition(D);
}
void MultiplexConsumer::HandleVTable(CXXRecordDecl *RD) {
for (auto &Consumer : Consumers)
Consumer->HandleVTable(RD);
}
ASTMutationListener *MultiplexConsumer::GetASTMutationListener() {
return MutationListener.get();
}
ASTDeserializationListener *MultiplexConsumer::GetASTDeserializationListener() {
return nullptr; // return DeserializationListener.get(); // HLSL Change - no support for serialization
}
void MultiplexConsumer::PrintStats() {
for (auto &Consumer : Consumers)
Consumer->PrintStats();
}
void MultiplexConsumer::InitializeSema(Sema &S) {
for (auto &Consumer : Consumers)
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
SC->InitializeSema(S);
}
void MultiplexConsumer::ForgetSema() {
for (auto &Consumer : Consumers)
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer.get()))
SC->ForgetSema();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp | //===---- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is a concrete diagnostic client, which buffers the diagnostic messages.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
typedef VerifyDiagnosticConsumer::Directive Directive;
typedef VerifyDiagnosticConsumer::DirectiveList DirectiveList;
typedef VerifyDiagnosticConsumer::ExpectedData ExpectedData;
VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
: Diags(Diags_),
PrimaryClient(Diags.getClient()), PrimaryClientOwner(Diags.takeClient()),
Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(nullptr),
LangOpts(nullptr), SrcManager(nullptr), ActiveSourceFiles(0),
Status(HasNoDirectives)
{
if (Diags.hasSourceManager())
setSourceManager(Diags.getSourceManager());
}
VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
assert(!ActiveSourceFiles && "Incomplete parsing of source files!");
assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
SrcManager = nullptr;
CheckDiagnostics();
Diags.takeClient().release();
}
#ifndef NDEBUG
namespace {
class VerifyFileTracker : public PPCallbacks {
VerifyDiagnosticConsumer &Verify;
SourceManager &SM;
public:
VerifyFileTracker(VerifyDiagnosticConsumer &Verify, SourceManager &SM)
: Verify(Verify), SM(SM) { }
/// \brief Hook into the preprocessor and update the list of parsed
/// files when the preprocessor indicates a new file is entered.
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override {
Verify.UpdateParsedFileStatus(SM, SM.getFileID(Loc),
VerifyDiagnosticConsumer::IsParsed);
}
};
} // End anonymous namespace.
#endif
// DiagnosticConsumer interface.
void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
const Preprocessor *PP) {
// Attach comment handler on first invocation.
if (++ActiveSourceFiles == 1) {
if (PP) {
CurrentPreprocessor = PP;
this->LangOpts = &LangOpts;
setSourceManager(PP->getSourceManager());
const_cast<Preprocessor*>(PP)->addCommentHandler(this);
#ifndef NDEBUG
// Debug build tracks parsed files.
const_cast<Preprocessor*>(PP)->addPPCallbacks(
llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
#endif
}
}
assert((!PP || CurrentPreprocessor == PP) && "Preprocessor changed!");
PrimaryClient->BeginSourceFile(LangOpts, PP);
}
void VerifyDiagnosticConsumer::EndSourceFile() {
assert(ActiveSourceFiles && "No active source files!");
PrimaryClient->EndSourceFile();
// Detach comment handler once last active source file completed.
if (--ActiveSourceFiles == 0) {
if (CurrentPreprocessor)
const_cast<Preprocessor*>(CurrentPreprocessor)->removeCommentHandler(this);
// Check diagnostics once last file completed.
CheckDiagnostics();
CurrentPreprocessor = nullptr;
LangOpts = nullptr;
}
}
void VerifyDiagnosticConsumer::HandleDiagnostic(
DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
if (Info.hasSourceManager()) {
// If this diagnostic is for a different source manager, ignore it.
if (SrcManager && &Info.getSourceManager() != SrcManager)
return;
setSourceManager(Info.getSourceManager());
}
#ifndef NDEBUG
// Debug build tracks unparsed files for possible
// unparsed expected-* directives.
if (SrcManager) {
SourceLocation Loc = Info.getLocation();
if (Loc.isValid()) {
ParsedStatus PS = IsUnparsed;
Loc = SrcManager->getExpansionLoc(Loc);
FileID FID = SrcManager->getFileID(Loc);
const FileEntry *FE = SrcManager->getFileEntryForID(FID);
if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
// If the file is a modules header file it shall not be parsed
// for expected-* directives.
HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
if (HS.findModuleForHeader(FE))
PS = IsUnparsedNoDirectives;
}
UpdateParsedFileStatus(*SrcManager, FID, PS);
}
}
#endif
// Send the diagnostic to the buffer, we will check it once we reach the end
// of the source file (or are destructed).
Buffer->HandleDiagnostic(DiagLevel, Info);
}
//===----------------------------------------------------------------------===//
// Checking diagnostics implementation.
//===----------------------------------------------------------------------===//
typedef TextDiagnosticBuffer::DiagList DiagList;
typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
namespace {
/// StandardDirective - Directive with string matching.
///
class StandardDirective : public Directive {
public:
StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
bool MatchAnyLine, StringRef Text, unsigned Min,
unsigned Max)
: Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) { }
bool isValid(std::string &Error) override {
// all strings are considered valid; even empty ones
return true;
}
bool match(StringRef S) override {
return S.find(Text) != StringRef::npos;
}
};
/// RegexDirective - Directive with regular-expression matching.
///
class RegexDirective : public Directive {
public:
RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max,
StringRef RegexStr)
: Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
Regex(RegexStr) { }
bool isValid(std::string &Error) override {
if (Regex.isValid(Error))
return true;
return false;
}
bool match(StringRef S) override {
return Regex.match(S);
}
private:
llvm::Regex Regex;
};
class ParseHelper
{
public:
ParseHelper(StringRef S)
: Begin(S.begin()), End(S.end()), C(Begin), P(Begin), PEnd(nullptr) {}
// Return true if string literal is next.
bool Next(StringRef S) {
P = C;
PEnd = C + S.size();
if (PEnd > End)
return false;
return !memcmp(P, S.data(), S.size());
}
// Return true if number is next.
// Output N only if number is next.
bool Next(unsigned &N) {
unsigned TMP = 0;
P = C;
for (; P < End && P[0] >= '0' && P[0] <= '9'; ++P) {
TMP *= 10;
TMP += P[0] - '0';
}
if (P == C)
return false;
PEnd = P;
N = TMP;
return true;
}
// Return true if string literal is found.
// When true, P marks begin-position of S in content.
bool Search(StringRef S, bool EnsureStartOfWord = false) {
do {
P = std::search(C, End, S.begin(), S.end());
PEnd = P + S.size();
if (P == End)
break;
if (!EnsureStartOfWord
// Check if string literal starts a new word.
|| P == Begin || isWhitespace(P[-1])
// Or it could be preceded by the start of a comment.
|| (P > (Begin + 1) && (P[-1] == '/' || P[-1] == '*')
&& P[-2] == '/'))
return true;
// Otherwise, skip and search again.
} while (Advance());
return false;
}
// Return true if a CloseBrace that closes the OpenBrace at the current nest
// level is found. When true, P marks begin-position of CloseBrace.
bool SearchClosingBrace(StringRef OpenBrace, StringRef CloseBrace) {
unsigned Depth = 1;
P = C;
while (P < End) {
StringRef S(P, End - P);
if (S.startswith(OpenBrace)) {
++Depth;
P += OpenBrace.size();
} else if (S.startswith(CloseBrace)) {
--Depth;
if (Depth == 0) {
PEnd = P + CloseBrace.size();
return true;
}
P += CloseBrace.size();
} else {
++P;
}
}
return false;
}
// Advance 1-past previous next/search.
// Behavior is undefined if previous next/search failed.
bool Advance() {
C = PEnd;
return C < End;
}
// Skip zero or more whitespace.
void SkipWhitespace() {
for (; C < End && isWhitespace(*C); ++C)
;
}
// Return true if EOF reached.
bool Done() {
return !(C < End);
}
const char * const Begin; // beginning of expected content
const char * const End; // end of expected content (1-past)
const char *C; // position of next char in content
const char *P;
private:
const char *PEnd; // previous next/search subject end (1-past)
};
} // namespace anonymous
/// ParseDirective - Go through the comment and see if it indicates expected
/// diagnostics. If so, then put them in the appropriate directive list.
///
/// Returns true if any valid directives were found.
static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
Preprocessor *PP, SourceLocation Pos,
VerifyDiagnosticConsumer::DirectiveStatus &Status) {
DiagnosticsEngine &Diags = PP ? PP->getDiagnostics() : SM.getDiagnostics();
// A single comment may contain multiple directives.
bool FoundDirective = false;
for (ParseHelper PH(S); !PH.Done();) {
// Search for token: expected
if (!PH.Search("expected", true))
break;
PH.Advance();
// Next token: -
if (!PH.Next("-"))
continue;
PH.Advance();
// Next token: { error | warning | note }
DirectiveList *DL = nullptr;
if (PH.Next("error"))
DL = ED ? &ED->Errors : nullptr;
else if (PH.Next("warning"))
DL = ED ? &ED->Warnings : nullptr;
else if (PH.Next("remark"))
DL = ED ? &ED->Remarks : nullptr;
else if (PH.Next("note"))
DL = ED ? &ED->Notes : nullptr;
else if (PH.Next("no-diagnostics")) {
if (Status == VerifyDiagnosticConsumer::HasOtherExpectedDirectives)
Diags.Report(Pos, diag::err_verify_invalid_no_diags)
<< /*IsExpectedNoDiagnostics=*/true;
else
Status = VerifyDiagnosticConsumer::HasExpectedNoDiagnostics;
continue;
} else
continue;
PH.Advance();
if (Status == VerifyDiagnosticConsumer::HasExpectedNoDiagnostics) {
Diags.Report(Pos, diag::err_verify_invalid_no_diags)
<< /*IsExpectedNoDiagnostics=*/false;
continue;
}
Status = VerifyDiagnosticConsumer::HasOtherExpectedDirectives;
// If a directive has been found but we're not interested
// in storing the directive information, return now.
if (!DL)
return true;
// Default directive kind.
bool RegexKind = false;
const char* KindStr = "string";
// Next optional token: -
if (PH.Next("-re")) {
PH.Advance();
RegexKind = true;
KindStr = "regex";
}
// Next optional token: @
SourceLocation ExpectedLoc;
bool MatchAnyLine = false;
if (!PH.Next("@")) {
ExpectedLoc = Pos;
} else {
PH.Advance();
unsigned Line = 0;
bool ExpectInvalid = false; // HLSL Change
bool FoundPlus = PH.Next("+");
if (FoundPlus || PH.Next("-")) {
// Relative to current line.
PH.Advance();
bool Invalid = false;
unsigned ExpectedLine = SM.getSpellingLineNumber(Pos, &Invalid);
if (!Invalid && PH.Next(Line) && (FoundPlus || Line < ExpectedLine)) {
if (FoundPlus) ExpectedLine += Line;
else ExpectedLine -= Line;
ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), ExpectedLine, 1);
}
// HLSL Change Starts
} else if (PH.Next("?")) {
ExpectInvalid = true;
// HLSL Change Ends
} else if (PH.Next(Line)) {
// Absolute line number.
if (Line > 0)
ExpectedLoc = SM.translateLineCol(SM.getFileID(Pos), Line, 1);
} else if (PP && PH.Search(":")) {
// Specific source file.
StringRef Filename(PH.C, PH.P-PH.C);
PH.Advance();
// Lookup file via Preprocessor, like a #include.
const DirectoryLookup *CurDir;
const FileEntry *FE =
PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
nullptr, nullptr, nullptr);
if (!FE) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_file) << Filename << KindStr;
continue;
}
if (SM.translateFile(FE).isInvalid())
SM.createFileID(FE, Pos, SrcMgr::C_User);
if (PH.Next(Line) && Line > 0)
ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
else if (PH.Next("*")) {
MatchAnyLine = true;
ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
}
}
// HLSL Change: guard against explicit invalid lcoations
if (!ExpectInvalid && ExpectedLoc.isInvalid()) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_line) << KindStr;
continue;
}
PH.Advance();
}
// Skip optional whitespace.
PH.SkipWhitespace();
// Next optional token: positive integer or a '+'.
unsigned Min = 1;
unsigned Max = 1;
if (PH.Next(Min)) {
PH.Advance();
// A positive integer can be followed by a '+' meaning min
// or more, or by a '-' meaning a range from min to max.
if (PH.Next("+")) {
Max = Directive::MaxCount;
PH.Advance();
} else if (PH.Next("-")) {
PH.Advance();
if (!PH.Next(Max) || Max < Min) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_invalid_range) << KindStr;
continue;
}
PH.Advance();
} else {
Max = Min;
}
} else if (PH.Next("+")) {
// '+' on its own means "1 or more".
Max = Directive::MaxCount;
PH.Advance();
}
// Skip optional whitespace.
PH.SkipWhitespace();
// Next token: {{
if (!PH.Next("{{")) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_start) << KindStr;
continue;
}
PH.Advance();
const char* const ContentBegin = PH.C; // mark content begin
// Search for token: }}
if (!PH.SearchClosingBrace("{{", "}}")) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_end) << KindStr;
continue;
}
const char* const ContentEnd = PH.P; // mark content end
PH.Advance();
// Build directive text; convert \n to newlines.
std::string Text;
StringRef NewlineStr = "\\n";
StringRef Content(ContentBegin, ContentEnd-ContentBegin);
size_t CPos = 0;
size_t FPos;
while ((FPos = Content.find(NewlineStr, CPos)) != StringRef::npos) {
Text += Content.substr(CPos, FPos-CPos);
Text += '\n';
CPos = FPos + NewlineStr.size();
}
if (Text.empty())
Text.assign(ContentBegin, ContentEnd);
// Check that regex directives contain at least one regex.
if (RegexKind && Text.find("{{") == StringRef::npos) {
Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
diag::err_verify_missing_regex) << Text;
return false;
}
// Construct new directive.
std::unique_ptr<Directive> D = Directive::create(
RegexKind, Pos, ExpectedLoc, MatchAnyLine, Text, Min, Max);
std::string Error;
if (D->isValid(Error)) {
DL->push_back(std::move(D));
FoundDirective = true;
} else {
Diags.Report(Pos.getLocWithOffset(ContentBegin-PH.Begin),
diag::err_verify_invalid_content)
<< KindStr << Error;
}
}
return FoundDirective;
}
/// HandleComment - Hook into the preprocessor and extract comments containing
/// expected errors and warnings.
bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
SourceRange Comment) {
SourceManager &SM = PP.getSourceManager();
// If this comment is for a different source manager, ignore it.
if (SrcManager && &SM != SrcManager)
return false;
SourceLocation CommentBegin = Comment.getBegin();
const char *CommentRaw = SM.getCharacterData(CommentBegin);
StringRef C(CommentRaw, SM.getCharacterData(Comment.getEnd()) - CommentRaw);
if (C.empty())
return false;
// Fold any "\<EOL>" sequences
size_t loc = C.find('\\');
if (loc == StringRef::npos) {
ParseDirective(C, &ED, SM, &PP, CommentBegin, Status);
return false;
}
std::string C2;
C2.reserve(C.size());
for (size_t last = 0;; loc = C.find('\\', last)) {
if (loc == StringRef::npos || loc == C.size()) {
C2 += C.substr(last);
break;
}
C2 += C.substr(last, loc-last);
last = loc + 1;
if (C[last] == '\n' || C[last] == '\r') {
++last;
// Escape \r\n or \n\r, but not \n\n.
if (last < C.size())
if (C[last] == '\n' || C[last] == '\r')
if (C[last] != C[last-1])
++last;
} else {
// This was just a normal backslash.
C2 += '\\';
}
}
if (!C2.empty())
ParseDirective(C2, &ED, SM, &PP, CommentBegin, Status);
return false;
}
#ifndef NDEBUG
/// \brief Lex the specified source file to determine whether it contains
/// any expected-* directives. As a Lexer is used rather than a full-blown
/// Preprocessor, directives inside skipped #if blocks will still be found.
///
/// \return true if any directives were found.
static bool findDirectives(SourceManager &SM, FileID FID,
const LangOptions &LangOpts) {
// Create a raw lexer to pull all the comments out of FID.
if (FID.isInvalid())
return false;
// Create a lexer to lex all the tokens of the main file in raw mode.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
Lexer RawLex(FID, FromFile, SM, LangOpts);
// Return comments as tokens, this is how we find expected diagnostics.
RawLex.SetCommentRetentionState(true);
Token Tok;
Tok.setKind(tok::comment);
VerifyDiagnosticConsumer::DirectiveStatus Status =
VerifyDiagnosticConsumer::HasNoDirectives;
while (Tok.isNot(tok::eof)) {
RawLex.LexFromRawLexer(Tok);
if (!Tok.is(tok::comment)) continue;
std::string Comment = RawLex.getSpelling(Tok, SM, LangOpts);
if (Comment.empty()) continue;
// Find first directive.
if (ParseDirective(Comment, nullptr, SM, nullptr, Tok.getLocation(),
Status))
return true;
}
return false;
}
#endif // !NDEBUG
/// \brief Takes a list of diagnostics that have been generated but not matched
/// by an expected-* directive and produces a diagnostic to the user from this.
static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
const_diag_iterator diag_begin,
const_diag_iterator diag_end,
const char *Kind) {
if (diag_begin == diag_end) return 0;
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (const_diag_iterator I = diag_begin, E = diag_end; I != E; ++I) {
if (I->first.isInvalid() || !SourceMgr)
OS << "\n (frontend)";
else {
OS << "\n ";
if (const FileEntry *File = SourceMgr->getFileEntryForID(
SourceMgr->getFileID(I->first)))
OS << " File " << File->getName();
OS << " Line " << SourceMgr->getPresumedLineNumber(I->first);
}
OS << ": " << I->second;
}
Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
<< Kind << /*Unexpected=*/true << OS.str();
return std::distance(diag_begin, diag_end);
}
/// \brief Takes a list of diagnostics that were expected to have been generated
/// but were not and produces a diagnostic to the user from this.
static unsigned PrintExpected(DiagnosticsEngine &Diags,
SourceManager &SourceMgr,
std::vector<Directive *> &DL, const char *Kind) {
if (DL.empty())
return 0;
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (auto *DirPtr : DL) {
Directive &D = *DirPtr;
OS << "\n File " << SourceMgr.getFilename(D.DiagnosticLoc);
if (D.MatchAnyLine)
OS << " Line *";
else
OS << " Line " << SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
if (D.DirectiveLoc != D.DiagnosticLoc)
OS << " (directive at "
<< SourceMgr.getFilename(D.DirectiveLoc) << ':'
<< SourceMgr.getPresumedLineNumber(D.DirectiveLoc) << ')';
OS << ": " << D.Text;
}
Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
<< Kind << /*Unexpected=*/false << OS.str();
return DL.size();
}
/// \brief Determine whether two source locations come from the same file.
static bool IsFromSameFile(SourceManager &SM, SourceLocation DirectiveLoc,
SourceLocation DiagnosticLoc) {
while (DiagnosticLoc.isMacroID())
DiagnosticLoc = SM.getImmediateMacroCallerLoc(DiagnosticLoc);
if (SM.isWrittenInSameFile(DirectiveLoc, DiagnosticLoc))
return true;
const FileEntry *DiagFile = SM.getFileEntryForID(SM.getFileID(DiagnosticLoc));
if (!DiagFile && SM.isWrittenInMainFile(DirectiveLoc))
return true;
return (DiagFile == SM.getFileEntryForID(SM.getFileID(DirectiveLoc)));
}
/// CheckLists - Compare expected to seen diagnostic lists and return the
/// the difference between them.
///
static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const char *Label,
DirectiveList &Left,
const_diag_iterator d2_begin,
const_diag_iterator d2_end,
bool IgnoreUnexpected) {
std::vector<Directive *> LeftOnly;
DiagList Right(d2_begin, d2_end);
for (auto &Owner : Left) {
Directive &D = *Owner;
unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
for (unsigned i = 0; i < D.Max; ++i) {
DiagList::iterator II, IE;
for (II = Right.begin(), IE = Right.end(); II != IE; ++II) {
if (!D.MatchAnyLine) {
unsigned LineNo2 = SourceMgr.getPresumedLineNumber(II->first);
if (LineNo1 != LineNo2)
continue;
}
if (!IsFromSameFile(SourceMgr, D.DiagnosticLoc, II->first))
continue;
const std::string &RightText = II->second;
if (D.match(RightText))
break;
}
if (II == IE) {
// Not found.
if (i >= D.Min) break;
LeftOnly.push_back(&D);
} else {
// Found. The same cannot be found twice.
Right.erase(II);
}
}
}
// Now all that's left in Right are those that were not matched.
unsigned num = PrintExpected(Diags, SourceMgr, LeftOnly, Label);
if (!IgnoreUnexpected)
num += PrintUnexpected(Diags, &SourceMgr, Right.begin(), Right.end(), Label);
return num;
}
/// CheckResults - This compares the expected results to those that
/// were actually reported. It emits any discrepencies. Return "true" if there
/// were problems. Return "false" otherwise.
///
static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const TextDiagnosticBuffer &Buffer,
ExpectedData &ED) {
// We want to capture the delta between what was expected and what was
// seen.
//
// Expected \ Seen - set expected but not seen
// Seen \ Expected - set seen but not expected
unsigned NumProblems = 0;
const DiagnosticLevelMask DiagMask =
Diags.getDiagnosticOptions().getVerifyIgnoreUnexpected();
// See if there are error mismatches.
NumProblems += CheckLists(Diags, SourceMgr, "error", ED.Errors,
Buffer.err_begin(), Buffer.err_end(),
bool(DiagnosticLevelMask::Error & DiagMask));
// See if there are warning mismatches.
NumProblems += CheckLists(Diags, SourceMgr, "warning", ED.Warnings,
Buffer.warn_begin(), Buffer.warn_end(),
bool(DiagnosticLevelMask::Warning & DiagMask));
// See if there are remark mismatches.
NumProblems += CheckLists(Diags, SourceMgr, "remark", ED.Remarks,
Buffer.remark_begin(), Buffer.remark_end(),
bool(DiagnosticLevelMask::Remark & DiagMask));
// See if there are note mismatches.
NumProblems += CheckLists(Diags, SourceMgr, "note", ED.Notes,
Buffer.note_begin(), Buffer.note_end(),
bool(DiagnosticLevelMask::Note & DiagMask));
return NumProblems;
}
void VerifyDiagnosticConsumer::UpdateParsedFileStatus(SourceManager &SM,
FileID FID,
ParsedStatus PS) {
// Check SourceManager hasn't changed.
setSourceManager(SM);
#ifndef NDEBUG
if (FID.isInvalid())
return;
const FileEntry *FE = SM.getFileEntryForID(FID);
if (PS == IsParsed) {
// Move the FileID from the unparsed set to the parsed set.
UnparsedFiles.erase(FID);
ParsedFiles.insert(std::make_pair(FID, FE));
} else if (!ParsedFiles.count(FID) && !UnparsedFiles.count(FID)) {
// Add the FileID to the unparsed set if we haven't seen it before.
// Check for directives.
bool FoundDirectives;
if (PS == IsUnparsedNoDirectives)
FoundDirectives = false;
else
FoundDirectives = !LangOpts || findDirectives(SM, FID, *LangOpts);
// Add the FileID to the unparsed set.
UnparsedFiles.insert(std::make_pair(FID,
UnparsedFileStatus(FE, FoundDirectives)));
}
#endif
}
void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Ensure any diagnostics go to the primary client.
DiagnosticConsumer *CurClient = Diags.getClient();
std::unique_ptr<DiagnosticConsumer> Owner = Diags.takeClient();
Diags.setClient(PrimaryClient, false);
#ifndef NDEBUG
// In a debug build, scan through any files that may have been missed
// during parsing and issue a fatal error if directives are contained
// within these files. If a fatal error occurs, this suggests that
// this file is being parsed separately from the main file, in which
// case consider moving the directives to the correct place, if this
// is applicable.
if (UnparsedFiles.size() > 0) {
// Generate a cache of parsed FileEntry pointers for alias lookups.
llvm::SmallPtrSet<const FileEntry *, 8> ParsedFileCache;
for (ParsedFilesMap::iterator I = ParsedFiles.begin(),
End = ParsedFiles.end(); I != End; ++I) {
if (const FileEntry *FE = I->second)
ParsedFileCache.insert(FE);
}
// Iterate through list of unparsed files.
for (UnparsedFilesMap::iterator I = UnparsedFiles.begin(),
End = UnparsedFiles.end(); I != End; ++I) {
const UnparsedFileStatus &Status = I->second;
const FileEntry *FE = Status.getFile();
// Skip files that have been parsed via an alias.
if (FE && ParsedFileCache.count(FE))
continue;
// Report a fatal error if this file contained directives.
if (Status.foundDirectives()) {
llvm::report_fatal_error(Twine("-verify directives found after rather"
" than during normal parsing of ",
StringRef(FE ? FE->getName() : "(unknown)")));
}
}
// UnparsedFiles has been processed now, so clear it.
UnparsedFiles.clear();
}
#endif // !NDEBUG
if (SrcManager) {
// Produce an error if no expected-* directives could be found in the
// source file(s) processed.
if (Status == HasNoDirectives) {
Diags.Report(diag::err_verify_no_directives).setForceEmit();
++NumErrors;
Status = HasNoDirectivesReported;
}
// Check that the expected diagnostics occurred.
NumErrors += CheckResults(Diags, *SrcManager, *Buffer, ED);
} else {
const DiagnosticLevelMask DiagMask =
~Diags.getDiagnosticOptions().getVerifyIgnoreUnexpected();
if (bool(DiagnosticLevelMask::Error & DiagMask))
NumErrors += PrintUnexpected(Diags, nullptr, Buffer->err_begin(),
Buffer->err_end(), "error");
if (bool(DiagnosticLevelMask::Warning & DiagMask))
NumErrors += PrintUnexpected(Diags, nullptr, Buffer->warn_begin(),
Buffer->warn_end(), "warn");
if (bool(DiagnosticLevelMask::Remark & DiagMask))
NumErrors += PrintUnexpected(Diags, nullptr, Buffer->remark_begin(),
Buffer->remark_end(), "remark");
if (bool(DiagnosticLevelMask::Note & DiagMask))
NumErrors += PrintUnexpected(Diags, nullptr, Buffer->note_begin(),
Buffer->note_end(), "note");
}
Diags.setClient(CurClient, Owner.release() != nullptr);
// Reset the buffer, we have processed all the diagnostics in it.
Buffer.reset(new TextDiagnosticBuffer());
ED.Reset();
}
std::unique_ptr<Directive> Directive::create(bool RegexKind,
SourceLocation DirectiveLoc,
SourceLocation DiagnosticLoc,
bool MatchAnyLine, StringRef Text,
unsigned Min, unsigned Max) {
if (!RegexKind)
return llvm::make_unique<StandardDirective>(DirectiveLoc, DiagnosticLoc,
MatchAnyLine, Text, Min, Max);
// Parse the directive into a regular expression.
std::string RegexStr;
StringRef S = Text;
while (!S.empty()) {
if (S.startswith("{{")) {
S = S.drop_front(2);
size_t RegexMatchLength = S.find("}}");
assert(RegexMatchLength != StringRef::npos);
// Append the regex, enclosed in parentheses.
RegexStr += "(";
RegexStr.append(S.data(), RegexMatchLength);
RegexStr += ")";
S = S.drop_front(RegexMatchLength + 2);
} else {
size_t VerbatimMatchLength = S.find("{{");
if (VerbatimMatchLength == StringRef::npos)
VerbatimMatchLength = S.size();
// Escape and append the fixed string.
RegexStr += llvm::Regex::escape(S.substr(0, VerbatimMatchLength));
S = S.drop_front(VerbatimMatchLength);
}
}
return llvm::make_unique<RegexDirective>(
DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max, RegexStr);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/LayoutOverrideSource.cpp | //===--- LayoutOverrideSource.cpp --Override Record Layouts ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/raw_ostream.h"
#include <fstream>
#include <string>
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
/// \brief Parse a simple identifier.
static std::string parseName(StringRef S) {
if (S.empty() || !isIdentifierHead(S[0]))
return "";
unsigned Offset = 1;
while (Offset < S.size() && isIdentifierBody(S[Offset]))
++Offset;
return S.substr(0, Offset).str();
}
LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
std::ifstream Input(Filename.str().c_str());
if (!Input.is_open())
return;
// Parse the output of -fdump-record-layouts.
std::string CurrentType;
Layout CurrentLayout;
bool ExpectingType = false;
while (Input.good()) {
std::string Line;
getline(Input, Line);
StringRef LineStr(Line);
// Determine whether the following line will start a
if (LineStr.find("*** Dumping AST Record Layout") != StringRef::npos) {
// Flush the last type/layout, if there is one.
if (!CurrentType.empty())
Layouts[CurrentType] = CurrentLayout;
CurrentLayout = Layout();
ExpectingType = true;
continue;
}
// If we're expecting a type, grab it.
if (ExpectingType) {
ExpectingType = false;
StringRef::size_type Pos;
if ((Pos = LineStr.find("struct ")) != StringRef::npos)
LineStr = LineStr.substr(Pos + strlen("struct "));
else if ((Pos = LineStr.find("class ")) != StringRef::npos)
LineStr = LineStr.substr(Pos + strlen("class "));
else if ((Pos = LineStr.find("union ")) != StringRef::npos)
LineStr = LineStr.substr(Pos + strlen("union "));
else
continue;
// Find the name of the type.
CurrentType = parseName(LineStr);
CurrentLayout = Layout();
continue;
}
// Check for the size of the type.
StringRef::size_type Pos = LineStr.find(" Size:");
if (Pos != StringRef::npos) {
// Skip past the " Size:" prefix.
LineStr = LineStr.substr(Pos + strlen(" Size:"));
unsigned long long Size = 0;
(void)LineStr.getAsInteger(10, Size);
CurrentLayout.Size = Size;
continue;
}
// Check for the alignment of the type.
Pos = LineStr.find("Alignment:");
if (Pos != StringRef::npos) {
// Skip past the "Alignment:" prefix.
LineStr = LineStr.substr(Pos + strlen("Alignment:"));
unsigned long long Alignment = 0;
(void)LineStr.getAsInteger(10, Alignment);
CurrentLayout.Align = Alignment;
continue;
}
// Check for the size/alignment of the type.
Pos = LineStr.find("sizeof=");
if (Pos != StringRef::npos) {
/* Skip past the sizeof= prefix. */
LineStr = LineStr.substr(Pos + strlen("sizeof="));
// Parse size.
unsigned long long Size = 0;
(void)LineStr.getAsInteger(10, Size);
CurrentLayout.Size = Size;
Pos = LineStr.find("align=");
if (Pos != StringRef::npos) {
/* Skip past the align= prefix. */
LineStr = LineStr.substr(Pos + strlen("align="));
// Parse alignment.
unsigned long long Alignment = 0;
(void)LineStr.getAsInteger(10, Alignment);
CurrentLayout.Align = Alignment;
}
continue;
}
// Check for the field offsets of the type.
Pos = LineStr.find("FieldOffsets: [");
if (Pos == StringRef::npos)
continue;
LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
while (!LineStr.empty() && isDigit(LineStr[0])) {
// Parse this offset.
unsigned Idx = 1;
while (Idx < LineStr.size() && isDigit(LineStr[Idx]))
++Idx;
unsigned long long Offset = 0;
(void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
CurrentLayout.FieldOffsets.push_back(Offset);
// Skip over this offset, the following comma, and any spaces.
LineStr = LineStr.substr(Idx + 1);
while (!LineStr.empty() && isWhitespace(LineStr[0]))
LineStr = LineStr.substr(1);
}
}
// Flush the last type/layout, if there is one.
if (!CurrentType.empty())
Layouts[CurrentType] = CurrentLayout;
}
bool
LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
uint64_t &Size, uint64_t &Alignment,
llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
{
// We can't override unnamed declarations.
if (!Record->getIdentifier())
return false;
// Check whether we have a layout for this record.
llvm::StringMap<Layout>::iterator Known = Layouts.find(Record->getName());
if (Known == Layouts.end())
return false;
// Provide field layouts.
unsigned NumFields = 0;
for (RecordDecl::field_iterator F = Record->field_begin(),
FEnd = Record->field_end();
F != FEnd; ++F, ++NumFields) {
if (NumFields >= Known->second.FieldOffsets.size())
continue;
FieldOffsets[*F] = Known->second.FieldOffsets[NumFields];
}
// Wrong number of fields.
if (NumFields != Known->second.FieldOffsets.size())
return false;
Size = Known->second.Size;
Alignment = Known->second.Align;
return true;
}
void LayoutOverrideSource::dump() {
raw_ostream &OS = llvm::errs();
for (llvm::StringMap<Layout>::iterator L = Layouts.begin(),
LEnd = Layouts.end();
L != LEnd; ++L) {
OS << "Type: blah " << L->first() << '\n';
OS << " Size:" << L->second.Size << '\n';
OS << " Alignment:" << L->second.Align << '\n';
OS << " FieldOffsets: [";
for (unsigned I = 0, N = L->second.FieldOffsets.size(); I != N; ++I) {
if (I)
OS << ", ";
OS << L->second.FieldOffsets[I];
}
OS << "]\n";
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/InitPreprocessor.cpp | //===--- InitPreprocessor.cpp - PP initialization code. ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the clang::InitializePreprocessor function.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "dxcversion.inc" // HLSL Change
#include "dxc/DXIL/DxilConstants.h" // HLSL Change
#include "dxc/DXIL/DxilShaderModel.h" // HLSL Change
using namespace clang;
static bool MacroBodyEndsInBackslash(StringRef MacroBody) {
while (!MacroBody.empty() && isWhitespace(MacroBody.back()))
MacroBody = MacroBody.drop_back();
return !MacroBody.empty() && MacroBody.back() == '\\';
}
// Append a #define line to Buf for Macro. Macro should be of the form XXX,
// in which case we emit "#define XXX 1" or "XXX=Y z W" in which case we emit
// "#define XXX Y z W". To get a #define with no value, use "XXX=".
static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
DiagnosticsEngine &Diags) {
std::pair<StringRef, StringRef> MacroPair = Macro.split('=');
StringRef MacroName = MacroPair.first;
StringRef MacroBody = MacroPair.second;
if (MacroName.size() != Macro.size()) {
// Per GCC -D semantics, the macro ends at \n if it exists.
StringRef::size_type End = MacroBody.find_first_of("\n\r");
if (End != StringRef::npos)
Diags.Report(diag::warn_fe_macro_contains_embedded_newline)
<< MacroName;
MacroBody = MacroBody.substr(0, End);
// We handle macro bodies which end in a backslash by appending an extra
// backslash+newline. This makes sure we don't accidentally treat the
// backslash as a line continuation marker.
if (MacroBodyEndsInBackslash(MacroBody))
Builder.defineMacro(MacroName, Twine(MacroBody) + "\\\n");
else
Builder.defineMacro(MacroName, MacroBody);
} else {
// Push "macroname 1".
Builder.defineMacro(Macro);
}
}
/// AddImplicitInclude - Add an implicit \#include of the specified file to the
/// predefines buffer.
/// As these includes are generated by -include arguments the header search
/// logic is going to search relatively to the current working directory.
static void AddImplicitInclude(MacroBuilder &Builder, StringRef File) {
Builder.append(Twine("#include \"") + File + "\"");
}
static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
Builder.append(Twine("#__include_macros \"") + File + "\"");
// Marker token to stop the __include_macros fetch loop.
Builder.append("##"); // ##?
}
/// AddImplicitIncludePTH - Add an implicit \#include using the original file
/// used to generate a PTH cache.
static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
StringRef ImplicitIncludePTH) {
PTHManager *P = PP.getPTHManager();
// Null check 'P' in the corner case where it couldn't be created.
const char *OriginalFile = P ? P->getOriginalSourceFile() : nullptr;
if (!OriginalFile) {
PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
<< ImplicitIncludePTH;
return;
}
AddImplicitInclude(Builder, OriginalFile);
}
#if 0 // HLSL Change Starts - no support for AST serialization
/// \brief Add an implicit \#include using the original file used to generate
/// a PCH file.
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
const PCHContainerReader &PCHContainerRdr,
StringRef ImplicitIncludePCH) {
std::string OriginalFile =
ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
PCHContainerRdr, PP.getDiagnostics());
if (OriginalFile.empty())
return;
AddImplicitInclude(Builder, OriginalFile);
}
#endif // HLSL Change End - no support for AST serialization
/// PickFP - This is used to pick a value based on the FP semantics of the
/// specified FP model.
template <typename T>
static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
T IEEEDoubleVal, T X87DoubleExtendedVal, T PPCDoubleDoubleVal,
T IEEEQuadVal) {
if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEsingle)
return IEEESingleVal;
if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEdouble)
return IEEEDoubleVal;
if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::x87DoubleExtended)
return X87DoubleExtendedVal;
if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::PPCDoubleDouble)
return PPCDoubleDoubleVal;
assert(Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEquad);
return IEEEQuadVal;
}
static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
const llvm::fltSemantics *Sem, StringRef Ext) {
const char *DenormMin, *Epsilon, *Max, *Min;
DenormMin = PickFP(Sem, "1.40129846e-45", "4.9406564584124654e-324",
"3.64519953188247460253e-4951",
"4.94065645841246544176568792868221e-324",
"6.47517511943802511092443895822764655e-4966");
int Digits = PickFP(Sem, 6, 15, 18, 31, 33);
int DecimalDigits = PickFP(Sem, 9, 17, 21, 33, 36);
Epsilon = PickFP(Sem, "1.19209290e-7", "2.2204460492503131e-16",
"1.08420217248550443401e-19",
"4.94065645841246544176568792868221e-324",
"1.92592994438723585305597794258492732e-34");
int MantissaDigits = PickFP(Sem, 24, 53, 64, 106, 113);
int Min10Exp = PickFP(Sem, -37, -307, -4931, -291, -4931);
int Max10Exp = PickFP(Sem, 38, 308, 4932, 308, 4932);
int MinExp = PickFP(Sem, -125, -1021, -16381, -968, -16381);
int MaxExp = PickFP(Sem, 128, 1024, 16384, 1024, 16384);
Min = PickFP(Sem, "1.17549435e-38", "2.2250738585072014e-308",
"3.36210314311209350626e-4932",
"2.00416836000897277799610805135016e-292",
"3.36210314311209350626267781732175260e-4932");
Max = PickFP(Sem, "3.40282347e+38", "1.7976931348623157e+308",
"1.18973149535723176502e+4932",
"1.79769313486231580793728971405301e+308",
"1.18973149535723176508575932662800702e+4932");
SmallString<32> DefPrefix;
DefPrefix = "__";
DefPrefix += Prefix;
DefPrefix += "_";
Builder.defineMacro(DefPrefix + "DENORM_MIN__", Twine(DenormMin)+Ext);
Builder.defineMacro(DefPrefix + "HAS_DENORM__");
Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits));
Builder.defineMacro(DefPrefix + "DECIMAL_DIG__", Twine(DecimalDigits));
Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon)+Ext);
Builder.defineMacro(DefPrefix + "HAS_INFINITY__");
Builder.defineMacro(DefPrefix + "HAS_QUIET_NAN__");
Builder.defineMacro(DefPrefix + "MANT_DIG__", Twine(MantissaDigits));
Builder.defineMacro(DefPrefix + "MAX_10_EXP__", Twine(Max10Exp));
Builder.defineMacro(DefPrefix + "MAX_EXP__", Twine(MaxExp));
Builder.defineMacro(DefPrefix + "MAX__", Twine(Max)+Ext);
Builder.defineMacro(DefPrefix + "MIN_10_EXP__","("+Twine(Min10Exp)+")");
Builder.defineMacro(DefPrefix + "MIN_EXP__", "("+Twine(MinExp)+")");
Builder.defineMacro(DefPrefix + "MIN__", Twine(Min)+Ext);
}
/// DefineTypeSize - Emit a macro to the predefines buffer that declares a macro
/// named MacroName with the max value for a type with width 'TypeWidth' a
/// signedness of 'isSigned' and with a value suffix of 'ValSuffix' (e.g. LL).
static void DefineTypeSize(const Twine &MacroName, unsigned TypeWidth,
StringRef ValSuffix, bool isSigned,
MacroBuilder &Builder) {
llvm::APInt MaxVal = isSigned ? llvm::APInt::getSignedMaxValue(TypeWidth)
: llvm::APInt::getMaxValue(TypeWidth);
Builder.defineMacro(MacroName, MaxVal.toString(10, isSigned) + ValSuffix);
}
/// DefineTypeSize - An overloaded helper that uses TargetInfo to determine
/// the width, suffix, and signedness of the given type
static void DefineTypeSize(const Twine &MacroName, TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
DefineTypeSize(MacroName, TI.getTypeWidth(Ty), TI.getTypeConstantSuffix(Ty),
TI.isTypeSigned(Ty), Builder);
}
static void DefineFmt(const Twine &Prefix, TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
bool IsSigned = TI.isTypeSigned(Ty);
StringRef FmtModifier = TI.getTypeFormatModifier(Ty);
for (const char *Fmt = IsSigned ? "di" : "ouxX"; *Fmt; ++Fmt) {
Builder.defineMacro(Prefix + "_FMT" + Twine(*Fmt) + "__",
Twine("\"") + FmtModifier + Twine(*Fmt) + "\"");
}
}
static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
MacroBuilder &Builder) {
Builder.defineMacro(MacroName, TargetInfo::getTypeName(Ty));
}
static void DefineTypeWidth(StringRef MacroName, TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
Builder.defineMacro(MacroName, Twine(TI.getTypeWidth(Ty)));
}
static void DefineTypeSizeof(StringRef MacroName, unsigned BitWidth,
const TargetInfo &TI, MacroBuilder &Builder) {
Builder.defineMacro(MacroName,
Twine(BitWidth / TI.getCharWidth()));
}
static void DefineExactWidthIntType(TargetInfo::IntType Ty,
const TargetInfo &TI,
MacroBuilder &Builder) {
int TypeWidth = TI.getTypeWidth(Ty);
bool IsSigned = TI.isTypeSigned(Ty);
// Use the target specified int64 type, when appropriate, so that [u]int64_t
// ends up being defined in terms of the correct type.
if (TypeWidth == 64)
Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
StringRef ConstSuffix(TI.getTypeConstantSuffix(Ty));
Builder.defineMacro(Prefix + Twine(TypeWidth) + "_C_SUFFIX__", ConstSuffix);
}
static void DefineExactWidthIntTypeSize(TargetInfo::IntType Ty,
const TargetInfo &TI,
MacroBuilder &Builder) {
int TypeWidth = TI.getTypeWidth(Ty);
bool IsSigned = TI.isTypeSigned(Ty);
// Use the target specified int64 type, when appropriate, so that [u]int64_t
// ends up being defined in terms of the correct type.
if (TypeWidth == 64)
Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
}
static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
const TargetInfo &TI,
MacroBuilder &Builder) {
TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
if (Ty == TargetInfo::NoInt)
return;
const char *Prefix = IsSigned ? "__INT_LEAST" : "__UINT_LEAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
const TargetInfo &TI, MacroBuilder &Builder) {
// stdint.h currently defines the fast int types as equivalent to the least
// types.
TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned);
if (Ty == TargetInfo::NoInt)
return;
const char *Prefix = IsSigned ? "__INT_FAST" : "__UINT_FAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
unsigned InlineWidth) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
TypeWidth <= InlineWidth)
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
return "1"; // "sometimes lock free"
}
/// \brief Add definitions required for a smooth interaction between
/// Objective-C++ automated reference counting and libstdc++ (4.2).
static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
MacroBuilder &Builder) {
Builder.defineMacro("_GLIBCXX_PREDEFINED_OBJC_ARC_IS_SCALAR");
std::string Result;
{
// Provide specializations for the __is_scalar type trait so that
// lifetime-qualified objects are not considered "scalar" types, which
// libstdc++ uses as an indicator of the presence of trivial copy, assign,
// default-construct, and destruct semantics (none of which hold for
// lifetime-qualified objects in ARC).
llvm::raw_string_ostream Out(Result);
Out << "namespace std {\n"
<< "\n"
<< "struct __true_type;\n"
<< "struct __false_type;\n"
<< "\n";
Out << "template<typename _Tp> struct __is_scalar;\n"
<< "\n";
Out << "template<typename _Tp>\n"
<< "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
<< " enum { __value = 0 };\n"
<< " typedef __false_type __type;\n"
<< "};\n"
<< "\n";
if (LangOpts.ObjCARCWeak) {
Out << "template<typename _Tp>\n"
<< "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
<< " enum { __value = 0 };\n"
<< " typedef __false_type __type;\n"
<< "};\n"
<< "\n";
}
Out << "template<typename _Tp>\n"
<< "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
<< " _Tp> {\n"
<< " enum { __value = 0 };\n"
<< " typedef __false_type __type;\n"
<< "};\n"
<< "\n";
Out << "}\n";
}
Builder.append(Result);
}
#define STRINGIFY_INNER(str) #str
#define STRINGIFY(str) STRINGIFY_INNER(str)
static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
#if 1 // HLSL Change Starts
if (LangOpts.HLSL) {
Builder.defineMacro("__hlsl_dx_compiler");
// DXC Version information
Builder.defineMacro("__DXC_VERSION_MAJOR", STRINGIFY(RC_VERSION_FIELD_1));
Builder.defineMacro("__DXC_VERSION_MINOR", STRINGIFY(RC_VERSION_FIELD_2));
Builder.defineMacro("__DXC_VERSION_RELEASE", STRINGIFY(RC_VERSION_FIELD_3));
Builder.defineMacro("__DXC_VERSION_COMMITS", STRINGIFY(RC_VERSION_FIELD_4));
// HLSL Version
Builder.defineMacro("__HLSL_VERSION",
Twine((unsigned int)LangOpts.HLSLVersion));
// This define is enabled in Clang and allows conditionally compiling code
// based on whether or not native 16-bit types are supported.
if (!LangOpts.UseMinPrecision)
Builder.defineMacro("__HLSL_ENABLE_16_BIT", "1");
// Shader target information
// "enums" for shader stages
Builder.defineMacro("__SHADER_STAGE_VERTEX", Twine((unsigned)hlsl::DXIL::ShaderKind::Vertex));
Builder.defineMacro("__SHADER_STAGE_PIXEL", Twine((unsigned)hlsl::DXIL::ShaderKind::Pixel));
Builder.defineMacro("__SHADER_STAGE_GEOMETRY",Twine((unsigned)hlsl::DXIL::ShaderKind::Geometry));
Builder.defineMacro("__SHADER_STAGE_HULL", Twine((unsigned)hlsl::DXIL::ShaderKind::Hull));
Builder.defineMacro("__SHADER_STAGE_DOMAIN", Twine((unsigned)hlsl::DXIL::ShaderKind::Domain));
Builder.defineMacro("__SHADER_STAGE_COMPUTE", Twine((unsigned)hlsl::DXIL::ShaderKind::Compute));
Builder.defineMacro("__SHADER_STAGE_AMPLIFICATION",Twine((unsigned)hlsl::DXIL::ShaderKind::Amplification));
Builder.defineMacro("__SHADER_STAGE_MESH", Twine((unsigned)hlsl::DXIL::ShaderKind::Mesh));
Builder.defineMacro("__SHADER_STAGE_LIBRARY", Twine((unsigned)hlsl::DXIL::ShaderKind::Library));
const hlsl::ShaderModel *SM = hlsl::ShaderModel::GetByName(LangOpts.HLSLProfile.c_str());
// The current shader stage itself
Builder.defineMacro("__SHADER_TARGET_STAGE", Twine((unsigned)SM->GetKind()));
// Add target versions
Builder.defineMacro("__SHADER_TARGET_MAJOR", Twine(SM->GetMajor()));
Builder.defineMacro("__SHADER_TARGET_MINOR", Twine(SM->GetMinor()));
// SPIRV Change Starts
#ifdef ENABLE_SPIRV_CODEGEN
if (LangOpts.SPIRV) {
Builder.defineMacro("__spirv__");
Builder.defineMacro("__SPIRV_MAJOR_VERSION__",
Twine(LangOpts.SpirvMajorVersion));
Builder.defineMacro("__SPIRV_MINOR_VERSION__",
Twine(LangOpts.SpirvMinorVersion));
}
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV Change Ends
}
return;
#else
if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
Builder.defineMacro("__STDC__");
if (LangOpts.Freestanding)
Builder.defineMacro("__STDC_HOSTED__", "0");
else
Builder.defineMacro("__STDC_HOSTED__");
if (!LangOpts.CPlusPlus) {
if (LangOpts.C11)
Builder.defineMacro("__STDC_VERSION__", "201112L");
else if (LangOpts.C99)
Builder.defineMacro("__STDC_VERSION__", "199901L");
else if (!LangOpts.GNUMode && LangOpts.Digraphs)
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
// FIXME: Use correct value for C++17.
if (LangOpts.CPlusPlus1z)
Builder.defineMacro("__cplusplus", "201406L");
// C++1y [cpp.predefined]p1:
// The name __cplusplus is defined to the value 201402L when compiling a
// C++ translation unit.
else if (LangOpts.CPlusPlus14)
Builder.defineMacro("__cplusplus", "201402L");
// C++11 [cpp.predefined]p1:
// The name __cplusplus is defined to the value 201103L when compiling a
// C++ translation unit.
else if (LangOpts.CPlusPlus11)
Builder.defineMacro("__cplusplus", "201103L");
// C++03 [cpp.predefined]p1:
// The name __cplusplus is defined to the value 199711L when compiling a
// C++ translation unit.
else
Builder.defineMacro("__cplusplus", "199711L");
}
// In C11 these are environment macros. In C++11 they are only defined
// as part of <cuchar>. To prevent breakage when mixing C and C++
// code, define these macros unconditionally. We can define them
// unconditionally, as Clang always uses UTF-16 and UTF-32 for 16-bit
// and 32-bit character literals.
Builder.defineMacro("__STDC_UTF_16__", "1");
Builder.defineMacro("__STDC_UTF_32__", "1");
if (LangOpts.ObjC1)
Builder.defineMacro("__OBJC__");
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
#endif // HLSL Change Ends
}
/// Initialize the predefined C++ language feature test macros defined in
/// ISO/IEC JTC1/SC22/WG21 (C++) SD-6: "SG10 Feature Test Recommendations".
static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
MacroBuilder &Builder) {
// C++98 features.
if (LangOpts.RTTI)
Builder.defineMacro("__cpp_rtti", "199711");
if (LangOpts.CXXExceptions)
Builder.defineMacro("__cpp_exceptions", "199711");
// C++11 features.
if (LangOpts.CPlusPlus11) {
Builder.defineMacro("__cpp_unicode_characters", "200704");
Builder.defineMacro("__cpp_raw_strings", "200710");
Builder.defineMacro("__cpp_unicode_literals", "200710");
Builder.defineMacro("__cpp_user_defined_literals", "200809");
Builder.defineMacro("__cpp_lambdas", "200907");
Builder.defineMacro("__cpp_constexpr",
LangOpts.CPlusPlus14 ? "201304" : "200704");
Builder.defineMacro("__cpp_range_based_for", "200907");
Builder.defineMacro("__cpp_static_assert", "200410");
Builder.defineMacro("__cpp_decltype", "200707");
Builder.defineMacro("__cpp_attributes", "200809");
Builder.defineMacro("__cpp_rvalue_references", "200610");
Builder.defineMacro("__cpp_variadic_templates", "200704");
Builder.defineMacro("__cpp_initializer_lists", "200806");
Builder.defineMacro("__cpp_delegating_constructors", "200604");
Builder.defineMacro("__cpp_nsdmi", "200809");
Builder.defineMacro("__cpp_inheriting_constructors", "200802");
Builder.defineMacro("__cpp_ref_qualifiers", "200710");
Builder.defineMacro("__cpp_alias_templates", "200704");
}
// C++14 features.
if (LangOpts.CPlusPlus14) {
Builder.defineMacro("__cpp_binary_literals", "201304");
Builder.defineMacro("__cpp_digit_separators", "201309");
Builder.defineMacro("__cpp_init_captures", "201304");
Builder.defineMacro("__cpp_generic_lambdas", "201304");
Builder.defineMacro("__cpp_decltype_auto", "201304");
Builder.defineMacro("__cpp_return_type_deduction", "201304");
Builder.defineMacro("__cpp_aggregate_nsdmi", "201304");
Builder.defineMacro("__cpp_variable_templates", "201304");
}
if (LangOpts.SizedDeallocation)
Builder.defineMacro("__cpp_sized_deallocation", "201309");
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
// Compiler version introspection macros.
Builder.defineMacro("__llvm__"); // LLVM Backend
Builder.defineMacro("__clang__"); // Clang Frontend
#define TOSTR2(X) #X
#define TOSTR(X) TOSTR2(X)
Builder.defineMacro("__clang_major__", TOSTR(CLANG_VERSION_MAJOR));
Builder.defineMacro("__clang_minor__", TOSTR(CLANG_VERSION_MINOR));
#ifdef CLANG_VERSION_PATCHLEVEL
Builder.defineMacro("__clang_patchlevel__", TOSTR(CLANG_VERSION_PATCHLEVEL));
#else
Builder.defineMacro("__clang_patchlevel__", "0");
#endif
Builder.defineMacro("__clang_version__",
"\"" CLANG_VERSION_STRING " "
+ getClangFullRepositoryVersion() + "\"");
#undef TOSTR
#undef TOSTR2
if (!LangOpts.MSVCCompat) {
// Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
// not compiling for MSVC compatibility
Builder.defineMacro("__GNUC_MINOR__", "2");
Builder.defineMacro("__GNUC_PATCHLEVEL__", "1");
Builder.defineMacro("__GNUC__", "4");
Builder.defineMacro("__GXX_ABI_VERSION", "1002");
}
// Define macros for the C11 / C++11 memory orderings
if (!LangOpts.HLSL) { // HLSL Change - don't include for HLSL
Builder.defineMacro("__ATOMIC_RELAXED", "0");
Builder.defineMacro("__ATOMIC_CONSUME", "1");
Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
Builder.defineMacro("__ATOMIC_RELEASE", "3");
Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
// Support for #pragma redefine_extname (Sun compatibility)
Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
} // HLSL Change - don't include for HLSL
// As sad as it is, enough software depends on the __VERSION__ for version
// checks that it is necessary to report 4.2.1 (the base GCC version we claim
// compatibility with) first.
Builder.defineMacro("__VERSION__", "\"4.2.1 Compatible " +
Twine(getClangFullCPPVersion()) + "\"");
// Initialize language-specific preprocessor defines.
// Standard conforming mode?
if (!LangOpts.HLSL) { // HLSL Change - don't include for HLSL
if (!LangOpts.GNUMode && !LangOpts.MSVCCompat)
Builder.defineMacro("__STRICT_ANSI__");
if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus11)
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
if (LangOpts.ObjC1) {
if (LangOpts.ObjCRuntime.isNonFragile()) {
Builder.defineMacro("__OBJC2__");
if (LangOpts.ObjCExceptions)
Builder.defineMacro("OBJC_ZEROCOST_EXCEPTIONS");
}
if (LangOpts.getGC() != LangOptions::NonGC)
Builder.defineMacro("__OBJC_GC__");
if (LangOpts.ObjCRuntime.isNeXTFamily())
Builder.defineMacro("__NEXT_RUNTIME__");
if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::ObjFW) {
VersionTuple tuple = LangOpts.ObjCRuntime.getVersion();
unsigned minor = 0;
if (tuple.getMinor().hasValue())
minor = tuple.getMinor().getValue();
unsigned subminor = 0;
if (tuple.getSubminor().hasValue())
subminor = tuple.getSubminor().getValue();
Builder.defineMacro("__OBJFW_RUNTIME_ABI__",
Twine(tuple.getMajor() * 10000 + minor * 100 +
subminor));
}
Builder.defineMacro("IBOutlet", "__attribute__((iboutlet))");
Builder.defineMacro("IBOutletCollection(ClassName)",
"__attribute__((iboutletcollection(ClassName)))");
Builder.defineMacro("IBAction", "void)__attribute__((ibaction)");
Builder.defineMacro("IBInspectable", "");
Builder.defineMacro("IB_DESIGNABLE", "");
}
if (LangOpts.CPlusPlus)
InitializeCPlusPlusFeatureTestMacros(LangOpts, Builder);
// darwin_constant_cfstrings controls this. This is also dependent
// on other things like the runtime I believe. This is set even for C code.
if (!LangOpts.NoConstantCFStrings)
Builder.defineMacro("__CONSTANT_CFSTRINGS__");
if (LangOpts.ObjC2)
Builder.defineMacro("OBJC_NEW_PROPERTIES");
if (LangOpts.PascalStrings)
Builder.defineMacro("__PASCAL_STRINGS__");
if (LangOpts.Blocks) {
Builder.defineMacro("__block", "__attribute__((__blocks__(byref)))");
Builder.defineMacro("__BLOCKS__");
}
if (!LangOpts.MSVCCompat && LangOpts.Exceptions)
Builder.defineMacro("__EXCEPTIONS");
if (!LangOpts.MSVCCompat && LangOpts.RTTI)
Builder.defineMacro("__GXX_RTTI");
if (LangOpts.SjLjExceptions)
Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__");
if (LangOpts.Deprecated)
Builder.defineMacro("__DEPRECATED");
if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus) {
Builder.defineMacro("__GNUG__", "4");
Builder.defineMacro("__GXX_WEAK__");
Builder.defineMacro("__private_extern__", "extern");
}
if (LangOpts.MicrosoftExt) {
if (LangOpts.WChar) {
// wchar_t supported as a keyword.
Builder.defineMacro("_WCHAR_T_DEFINED");
Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
}
}
if (LangOpts.Optimize)
Builder.defineMacro("__OPTIMIZE__");
if (LangOpts.OptimizeSize)
Builder.defineMacro("__OPTIMIZE_SIZE__");
} // HLSL Change - don't include for HLSL
if (LangOpts.FastMath)
Builder.defineMacro("__FAST_MATH__");
// Initialize target-specific preprocessor defines.
// __BYTE_ORDER__ was added in GCC 4.6. It's analogous
// to the macro __BYTE_ORDER (no trailing underscores)
// from glibc's <endian.h> header.
// We don't support the PDP-11 as a target, but include
// the define so it can still be compared against.
Builder.defineMacro("__ORDER_LITTLE_ENDIAN__", "1234");
Builder.defineMacro("__ORDER_BIG_ENDIAN__", "4321");
Builder.defineMacro("__ORDER_PDP_ENDIAN__", "3412");
if (TI.isBigEndian()) {
Builder.defineMacro("__BYTE_ORDER__", "__ORDER_BIG_ENDIAN__");
Builder.defineMacro("__BIG_ENDIAN__");
} else {
Builder.defineMacro("__BYTE_ORDER__", "__ORDER_LITTLE_ENDIAN__");
Builder.defineMacro("__LITTLE_ENDIAN__");
}
if (!LangOpts.HLSL) { // HLSL Change - don't include for HLSL
if (TI.getPointerWidth(0) == 64 && TI.getLongWidth() == 64
&& TI.getIntWidth() == 32) {
Builder.defineMacro("_LP64");
Builder.defineMacro("__LP64__");
}
if (TI.getPointerWidth(0) == 32 && TI.getLongWidth() == 32
&& TI.getIntWidth() == 32) {
Builder.defineMacro("_ILP32");
Builder.defineMacro("__ILP32__");
}
// Define type sizing macros based on the target properties.
assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
Builder.defineMacro("__CHAR_BIT__", "8");
DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);
DefineTypeSize("__LONG_MAX__", TargetInfo::SignedLong, TI, Builder);
DefineTypeSize("__LONG_LONG_MAX__", TargetInfo::SignedLongLong, TI, Builder);
DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
DefineTypeSize("__SIZE_MAX__", TI.getSizeType(), TI, Builder);
DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_INT__", TI.getIntWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder);
DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder);
DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_PTRDIFF_T__",
TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder);
DefineTypeSizeof("__SIZEOF_SIZE_T__",
TI.getTypeWidth(TI.getSizeType()), TI, Builder);
DefineTypeSizeof("__SIZEOF_WCHAR_T__",
TI.getTypeWidth(TI.getWCharType()), TI, Builder);
DefineTypeSizeof("__SIZEOF_WINT_T__",
TI.getTypeWidth(TI.getWIntType()), TI, Builder);
if (TI.hasInt128Type())
DefineTypeSizeof("__SIZEOF_INT128__", 128, TI, Builder);
DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
DefineFmt("__INTMAX", TI.getIntMaxType(), TI, Builder);
Builder.defineMacro("__INTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getIntMaxType()));
DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder);
DefineFmt("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
Builder.defineMacro("__UINTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getUIntMaxType()));
DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Builder);
DefineFmt("__PTRDIFF", TI.getPtrDiffType(0), TI, Builder);
DefineTypeWidth("__PTRDIFF_WIDTH__", TI.getPtrDiffType(0), TI, Builder);
DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
DefineFmt("__INTPTR", TI.getIntPtrType(), TI, Builder);
DefineTypeWidth("__INTPTR_WIDTH__", TI.getIntPtrType(), TI, Builder);
DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
DefineFmt("__SIZE", TI.getSizeType(), TI, Builder);
DefineTypeWidth("__SIZE_WIDTH__", TI.getSizeType(), TI, Builder);
DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
DefineTypeWidth("__WCHAR_WIDTH__", TI.getWCharType(), TI, Builder);
DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
DefineTypeSize("__SIG_ATOMIC_MAX__", TI.getSigAtomicType(), TI, Builder);
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
} // HLSL Change - don't include for HLSL
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
if (!LangOpts.HLSL) { // HLSL Change - don't include for HLSL
DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
// Define a __POINTER_WIDTH__ macro for stdint.h.
Builder.defineMacro("__POINTER_WIDTH__",
Twine((int)TI.getPointerWidth(0)));
// Define __BIGGEST_ALIGNMENT__ to be compatible with gcc.
Builder.defineMacro("__BIGGEST_ALIGNMENT__",
Twine(TI.getSuitableAlign() / TI.getCharWidth()) );
if (!LangOpts.CharIsSigned)
Builder.defineMacro("__CHAR_UNSIGNED__");
if (!TargetInfo::isTypeSigned(TI.getWCharType()))
Builder.defineMacro("__WCHAR_UNSIGNED__");
if (!TargetInfo::isTypeSigned(TI.getWIntType()))
Builder.defineMacro("__WINT_UNSIGNED__");
// Define exact-width integer types for stdint.h
DefineExactWidthIntType(TargetInfo::SignedChar, TI, Builder);
if (TI.getShortWidth() > TI.getCharWidth())
DefineExactWidthIntType(TargetInfo::SignedShort, TI, Builder);
if (TI.getIntWidth() > TI.getShortWidth())
DefineExactWidthIntType(TargetInfo::SignedInt, TI, Builder);
if (TI.getLongWidth() > TI.getIntWidth())
DefineExactWidthIntType(TargetInfo::SignedLong, TI, Builder);
if (TI.getLongLongWidth() > TI.getLongWidth())
DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
DefineExactWidthIntType(TargetInfo::UnsignedChar, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedChar, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedChar, TI, Builder);
if (TI.getShortWidth() > TI.getCharWidth()) {
DefineExactWidthIntType(TargetInfo::UnsignedShort, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedShort, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedShort, TI, Builder);
}
if (TI.getIntWidth() > TI.getShortWidth()) {
DefineExactWidthIntType(TargetInfo::UnsignedInt, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedInt, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedInt, TI, Builder);
}
if (TI.getLongWidth() > TI.getIntWidth()) {
DefineExactWidthIntType(TargetInfo::UnsignedLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedLong, TI, Builder);
}
if (TI.getLongLongWidth() > TI.getLongWidth()) {
DefineExactWidthIntType(TargetInfo::UnsignedLongLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::UnsignedLongLong, TI, Builder);
DefineExactWidthIntTypeSize(TargetInfo::SignedLongLong, TI, Builder);
}
DefineLeastWidthIntType(8, true, TI, Builder);
DefineLeastWidthIntType(8, false, TI, Builder);
DefineLeastWidthIntType(16, true, TI, Builder);
DefineLeastWidthIntType(16, false, TI, Builder);
DefineLeastWidthIntType(32, true, TI, Builder);
DefineLeastWidthIntType(32, false, TI, Builder);
DefineLeastWidthIntType(64, true, TI, Builder);
DefineLeastWidthIntType(64, false, TI, Builder);
DefineFastIntType(8, true, TI, Builder);
DefineFastIntType(8, false, TI, Builder);
DefineFastIntType(16, true, TI, Builder);
DefineFastIntType(16, false, TI, Builder);
DefineFastIntType(32, true, TI, Builder);
DefineFastIntType(32, false, TI, Builder);
DefineFastIntType(64, true, TI, Builder);
DefineFastIntType(64, false, TI, Builder);
if (const char *Prefix = TI.getUserLabelPrefix())
Builder.defineMacro("__USER_LABEL_PREFIX__", Prefix);
if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
else
Builder.defineMacro("__FINITE_MATH_ONLY__", "0");
if (!LangOpts.MSVCCompat) {
if (LangOpts.GNUInline || LangOpts.CPlusPlus)
Builder.defineMacro("__GNUC_GNU_INLINE__");
else
Builder.defineMacro("__GNUC_STDC_INLINE__");
// The value written by __atomic_test_and_set.
// FIXME: This is target-dependent.
Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
// Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
DEFINE_LOCK_FREE_MACRO(SHORT, Short);
DEFINE_LOCK_FREE_MACRO(INT, Int);
DEFINE_LOCK_FREE_MACRO(LONG, Long);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
}
if (LangOpts.NoInlineDefine)
Builder.defineMacro("__NO_INLINE__");
if (unsigned PICLevel = LangOpts.PICLevel) {
Builder.defineMacro("__PIC__", Twine(PICLevel));
Builder.defineMacro("__pic__", Twine(PICLevel));
}
if (unsigned PIELevel = LangOpts.PIELevel) {
Builder.defineMacro("__PIE__", Twine(PIELevel));
Builder.defineMacro("__pie__", Twine(PIELevel));
}
// Macros to control C99 numerics and <float.h>
Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
} // HLSL Change - don't include for HLSL
Builder.defineMacro("__FLT_RADIX__", "2");
Builder.defineMacro("__DECIMAL_DIG__", "__LDBL_DECIMAL_DIG__");
if (!LangOpts.HLSL) { // HLSL Change - don't include for HLSL
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
Builder.defineMacro("__SSP__");
else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
Builder.defineMacro("__SSP_STRONG__", "2");
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
Builder.defineMacro("__SSP_ALL__", "3");
if (FEOpts.ProgramAction == frontend::RewriteObjC)
Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
// Define a macro that exists only when using the static analyzer.
if (FEOpts.ProgramAction == frontend::RunAnalysis)
Builder.defineMacro("__clang_analyzer__");
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
if (LangOpts.ObjCAutoRefCount) {
Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
Builder.defineMacro("__autoreleasing",
"__attribute__((objc_ownership(autoreleasing)))");
Builder.defineMacro("__unsafe_unretained",
"__attribute__((objc_ownership(none)))");
}
// On Darwin, there are __double_underscored variants of the type
// nullability qualifiers.
if (TI.getTriple().isOSDarwin()) {
Builder.defineMacro("__nonnull", "_Nonnull");
Builder.defineMacro("__null_unspecified", "_Null_unspecified");
Builder.defineMacro("__nullable", "_Nullable");
}
// OpenMP definition
if (LangOpts.OpenMP) {
// OpenMP 2.2:
// In implementations that support a preprocessor, the _OPENMP
// macro name is defined to have the decimal value yyyymm where
// yyyy and mm are the year and the month designations of the
// version of the OpenMP API that the implementation support.
Builder.defineMacro("_OPENMP", "201307");
}
// CUDA device path compilaton
if (LangOpts.CUDAIsDevice) {
// The CUDA_ARCH value is set for the GPU target specified in the NVPTX
// backend's target defines.
Builder.defineMacro("__CUDA_ARCH__");
}
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
} // HLSL Change - don't include for HLSL
}
/// InitializePreprocessor - Initialize the preprocessor getting it and the
/// environment ready to process a single file. This returns true on error.
///
void clang::InitializePreprocessor(
Preprocessor &PP, const PreprocessorOptions &InitOpts,
const PCHContainerReader &PCHContainerRdr,
const FrontendOptions &FEOpts) {
const LangOptions &LangOpts = PP.getLangOpts();
std::string PredefineBuffer;
PredefineBuffer.reserve(4080);
llvm::raw_string_ostream Predefines(PredefineBuffer);
MacroBuilder Builder(Predefines);
// Emit line markers for various builtin sections of the file. We don't do
// this in asm preprocessor mode, because "# 4" is not a line marker directive
// in this mode.
if (!PP.getLangOpts().AsmPreprocessor && !PP.getLangOpts().HLSL) // HLSL Change - don't print built-ins
Builder.append("# 1 \"<built-in>\" 3");
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
if (InitOpts.UsePredefines) {
InitializePredefinedMacros(PP.getTargetInfo(), LangOpts, FEOpts, Builder);
// Install definitions to make Objective-C++ ARC work well with various
// C++ Standard Library implementations.
if (LangOpts.ObjC1 && LangOpts.CPlusPlus && LangOpts.ObjCAutoRefCount) {
switch (InitOpts.ObjCXXARCStandardLibrary) {
case ARCXX_nolib:
case ARCXX_libcxx:
break;
case ARCXX_libstdcxx:
AddObjCXXARCLibstdcxxDefines(LangOpts, Builder);
break;
}
}
}
// Even with predefines off, some macros are still predefined.
// These should all be defined in the preprocessor according to the
// current language configuration.
InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOpts(),
FEOpts, Builder);
// Add on the predefines from the driver. Wrap in a #line directive to report
// that they come from the command line.
if (!PP.getLangOpts().AsmPreprocessor && !PP.getLangOpts().HLSL) // HLSL Change - don't print built-ins
Builder.append("# 1 \"<command line>\" 1");
// Process #define's and #undef's in the order they are given.
for (unsigned i = 0, e = InitOpts.Macros.size(); i != e; ++i) {
if (InitOpts.Macros[i].second) // isUndef
Builder.undefineMacro(InitOpts.Macros[i].first);
else
DefineBuiltinMacro(Builder, InitOpts.Macros[i].first,
PP.getDiagnostics());
}
// If -imacros are specified, include them now. These are processed before
// any -include directives.
for (unsigned i = 0, e = InitOpts.MacroIncludes.size(); i != e; ++i)
AddImplicitIncludeMacros(Builder, InitOpts.MacroIncludes[i]);
// Process -include-pch/-include-pth directives.
#if 0 // HLSL Change Starts - no support for AST serialization
if (!InitOpts.ImplicitPCHInclude.empty())
AddImplicitIncludePCH(Builder, PP, PCHContainerRdr,
InitOpts.ImplicitPCHInclude);
#endif // HLSL Change Ends - no support for AST serialization
if (!InitOpts.ImplicitPTHInclude.empty())
AddImplicitIncludePTH(Builder, PP, InitOpts.ImplicitPTHInclude);
// Process -include directives.
for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
const std::string &Path = InitOpts.Includes[i];
AddImplicitInclude(Builder, Path);
}
// Exit the command line and go back to <built-in> (2 is LC_LEAVE).
if (!PP.getLangOpts().AsmPreprocessor && !PP.getLangOpts().HLSL) // HLSL Change - don't print built-ins
Builder.append("# 1 \"<built-in>\" 2");
// Instruct the preprocessor to skip the preamble.
PP.setSkipMainFilePreamble(InitOpts.PrecompiledPreambleBytes.first,
InitOpts.PrecompiledPreambleBytes.second);
// Copy PredefinedBuffer into the Preprocessor.
PP.setPredefines(Predefines.str());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/CacheTokens.cpp | //===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides a possible implementation of PTH support for Clang that is
// based on caching lexed tokens and identifiers.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/Utils.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
// FIXME: put this somewhere else?
#ifndef S_ISDIR
#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
#endif
using namespace clang;
//===----------------------------------------------------------------------===//
// PTH-specific stuff.
//===----------------------------------------------------------------------===//
typedef uint32_t Offset;
namespace {
class PTHEntry {
Offset TokenData, PPCondData;
public:
PTHEntry() {}
PTHEntry(Offset td, Offset ppcd)
: TokenData(td), PPCondData(ppcd) {}
Offset getTokenOffset() const { return TokenData; }
Offset getPPCondTableOffset() const { return PPCondData; }
};
class PTHEntryKeyVariant {
union { const FileEntry* FE; const char* Path; };
enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
FileData *Data;
public:
PTHEntryKeyVariant(const FileEntry *fe) : FE(fe), Kind(IsFE), Data(nullptr) {}
PTHEntryKeyVariant(FileData *Data, const char *path)
: Path(path), Kind(IsDE), Data(new FileData(*Data)) {}
explicit PTHEntryKeyVariant(const char *path)
: Path(path), Kind(IsNoExist), Data(nullptr) {}
bool isFile() const { return Kind == IsFE; }
StringRef getString() const {
return Kind == IsFE ? FE->getName() : Path;
}
unsigned getKind() const { return (unsigned) Kind; }
void EmitData(raw_ostream& Out) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
switch (Kind) {
case IsFE: {
// Emit stat information.
llvm::sys::fs::UniqueID UID = FE->getUniqueID();
LE.write<uint64_t>(UID.getFile());
LE.write<uint64_t>(UID.getDevice());
LE.write<uint64_t>(FE->getModificationTime());
LE.write<uint64_t>(FE->getSize());
} break;
case IsDE:
// Emit stat information.
LE.write<uint64_t>(Data->UniqueID.getFile());
LE.write<uint64_t>(Data->UniqueID.getDevice());
LE.write<uint64_t>(Data->ModTime);
LE.write<uint64_t>(Data->Size);
delete Data;
break;
default:
break;
}
}
unsigned getRepresentationLength() const {
return Kind == IsNoExist ? 0 : 4 + 4 + 2 + 8 + 8;
}
};
class FileEntryPTHEntryInfo {
public:
typedef PTHEntryKeyVariant key_type;
typedef key_type key_type_ref;
typedef PTHEntry data_type;
typedef const PTHEntry& data_type_ref;
typedef unsigned hash_value_type;
typedef unsigned offset_type;
static hash_value_type ComputeHash(PTHEntryKeyVariant V) {
return llvm::HashString(V.getString());
}
static std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
const PTHEntry& E) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
unsigned n = V.getString().size() + 1 + 1;
LE.write<uint16_t>(n);
unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
LE.write<uint8_t>(m);
return std::make_pair(n, m);
}
static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
using namespace llvm::support;
// Emit the entry kind.
endian::Writer<little>(Out).write<uint8_t>((unsigned)V.getKind());
// Emit the string.
Out.write(V.getString().data(), n - 1);
}
static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
const PTHEntry& E, unsigned) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
// For file entries emit the offsets into the PTH file for token data
// and the preprocessor blocks table.
if (V.isFile()) {
LE.write<uint32_t>(E.getTokenOffset());
LE.write<uint32_t>(E.getPPCondTableOffset());
}
// Emit any other data associated with the key (i.e., stat information).
V.EmitData(Out);
}
};
class OffsetOpt {
bool valid;
Offset off;
public:
OffsetOpt() : valid(false) {}
bool hasOffset() const { return valid; }
Offset getOffset() const { assert(valid); return off; }
void setOffset(Offset o) { off = o; valid = true; }
};
} // end anonymous namespace
typedef llvm::OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
namespace {
class PTHWriter {
typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
IDMap IM;
raw_pwrite_stream &Out;
Preprocessor& PP;
uint32_t idcount;
PTHMap PM;
CachedStrsTy CachedStrs;
Offset CurStrOffset;
std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
//// Get the persistent id for the given IdentifierInfo*.
uint32_t ResolveID(const IdentifierInfo* II);
/// Emit a token to the PTH file.
void EmitToken(const Token& T);
void Emit8(uint32_t V) {
using namespace llvm::support;
endian::Writer<little>(Out).write<uint8_t>(V);
}
void Emit16(uint32_t V) {
using namespace llvm::support;
endian::Writer<little>(Out).write<uint16_t>(V);
}
void Emit32(uint32_t V) {
using namespace llvm::support;
endian::Writer<little>(Out).write<uint32_t>(V);
}
void EmitBuf(const char *Ptr, unsigned NumBytes) {
Out.write(Ptr, NumBytes);
}
void EmitString(StringRef V) {
using namespace llvm::support;
endian::Writer<little>(Out).write<uint16_t>(V.size());
EmitBuf(V.data(), V.size());
}
/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
/// a hashtable mapping from identifier strings to persistent IDs.
/// The second is a straight table mapping from persistent IDs to string data
/// (the keys of the first table).
std::pair<Offset, Offset> EmitIdentifierTable();
/// EmitFileTable - Emit a table mapping from file name strings to PTH
/// token data.
Offset EmitFileTable() { return PM.Emit(Out); }
PTHEntry LexTokens(Lexer& L);
Offset EmitCachedSpellings();
public:
PTHWriter(raw_pwrite_stream &out, Preprocessor &pp)
: Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
PTHMap &getPM() { return PM; }
void GeneratePTH(const std::string &MainFile);
};
} // end anonymous namespace
uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
// Null IdentifierInfo's map to the persistent ID 0.
if (!II)
return 0;
IDMap::iterator I = IM.find(II);
if (I != IM.end())
return I->second; // We've already added 1.
IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
return idcount;
}
void PTHWriter::EmitToken(const Token& T) {
// Emit the token kind, flags, and length.
Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
(((uint32_t) T.getLength()) << 16));
if (!T.isLiteral()) {
Emit32(ResolveID(T.getIdentifierInfo()));
} else {
// We cache *un-cleaned* spellings. This gives us 100% fidelity with the
// source code.
StringRef s(T.getLiteralData(), T.getLength());
// Get the string entry.
auto &E = *CachedStrs.insert(std::make_pair(s, OffsetOpt())).first;
// If this is a new string entry, bump the PTH offset.
if (!E.second.hasOffset()) {
E.second.setOffset(CurStrOffset);
StrEntries.push_back(&E);
CurStrOffset += s.size() + 1;
}
// Emit the relative offset into the PTH file for the spelling string.
Emit32(E.second.getOffset());
}
// Emit the offset into the original source file of this token so that we
// can reconstruct its SourceLocation.
Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
}
PTHEntry PTHWriter::LexTokens(Lexer& L) {
// Pad 0's so that we emit tokens to a 4-byte alignment.
// This speed up reading them back in.
using namespace llvm::support;
endian::Writer<little> LE(Out);
uint32_t TokenOff = Out.tell();
for (uint64_t N = llvm::OffsetToAlignment(TokenOff, 4); N; --N, ++TokenOff)
LE.write<uint8_t>(0);
// Keep track of matching '#if' ... '#endif'.
typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
PPCondTable PPCond;
std::vector<unsigned> PPStartCond;
bool ParsingPreprocessorDirective = false;
Token Tok;
do {
L.LexFromRawLexer(Tok);
NextToken:
if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
ParsingPreprocessorDirective) {
// Insert an eod token into the token cache. It has the same
// position as the next token that is not on the same line as the
// preprocessor directive. Observe that we continue processing
// 'Tok' when we exit this branch.
Token Tmp = Tok;
Tmp.setKind(tok::eod);
Tmp.clearFlag(Token::StartOfLine);
Tmp.setIdentifierInfo(nullptr);
EmitToken(Tmp);
ParsingPreprocessorDirective = false;
}
if (Tok.is(tok::raw_identifier)) {
PP.LookUpIdentifierInfo(Tok);
EmitToken(Tok);
continue;
}
if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
// Special processing for #include. Store the '#' token and lex
// the next token.
assert(!ParsingPreprocessorDirective);
Offset HashOff = (Offset) Out.tell();
// Get the next token.
Token NextTok;
L.LexFromRawLexer(NextTok);
// If we see the start of line, then we had a null directive "#". In
// this case, discard both tokens.
if (NextTok.isAtStartOfLine())
goto NextToken;
// The token is the start of a directive. Emit it.
EmitToken(Tok);
Tok = NextTok;
// Did we see 'include'/'import'/'include_next'?
if (Tok.isNot(tok::raw_identifier)) {
EmitToken(Tok);
continue;
}
IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
tok::PPKeywordKind K = II->getPPKeywordID();
ParsingPreprocessorDirective = true;
switch (K) {
case tok::pp_not_keyword:
// Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
// them through.
default:
break;
case tok::pp_include:
case tok::pp_import:
case tok::pp_include_next: {
// Save the 'include' token.
EmitToken(Tok);
// Lex the next token as an include string.
L.setParsingPreprocessorDirective(true);
L.LexIncludeFilename(Tok);
L.setParsingPreprocessorDirective(false);
assert(!Tok.isAtStartOfLine());
if (Tok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(Tok);
break;
}
case tok::pp_if:
case tok::pp_ifdef:
case tok::pp_ifndef: {
// Add an entry for '#if' and friends. We initially set the target
// index to 0. This will get backpatched when we hit #endif.
PPStartCond.push_back(PPCond.size());
PPCond.push_back(std::make_pair(HashOff, 0U));
break;
}
case tok::pp_endif: {
// Add an entry for '#endif'. We set the target table index to itself.
// This will later be set to zero when emitting to the PTH file. We
// use 0 for uninitialized indices because that is easier to debug.
unsigned index = PPCond.size();
// Backpatch the opening '#if' entry.
assert(!PPStartCond.empty());
assert(PPCond.size() > PPStartCond.back());
assert(PPCond[PPStartCond.back()].second == 0);
PPCond[PPStartCond.back()].second = index;
PPStartCond.pop_back();
// Add the new entry to PPCond.
PPCond.push_back(std::make_pair(HashOff, index));
EmitToken(Tok);
// Some files have gibberish on the same line as '#endif'.
// Discard these tokens.
do
L.LexFromRawLexer(Tok);
while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
// We have the next token in hand.
// Don't immediately lex the next one.
goto NextToken;
}
case tok::pp_elif:
case tok::pp_else: {
// Add an entry for #elif or #else.
// This serves as both a closing and opening of a conditional block.
// This means that its entry will get backpatched later.
unsigned index = PPCond.size();
// Backpatch the previous '#if' entry.
assert(!PPStartCond.empty());
assert(PPCond.size() > PPStartCond.back());
assert(PPCond[PPStartCond.back()].second == 0);
PPCond[PPStartCond.back()].second = index;
PPStartCond.pop_back();
// Now add '#elif' as a new block opening.
PPCond.push_back(std::make_pair(HashOff, 0U));
PPStartCond.push_back(index);
break;
}
}
}
EmitToken(Tok);
}
while (Tok.isNot(tok::eof));
assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
// Next write out PPCond.
Offset PPCondOff = (Offset) Out.tell();
// Write out the size of PPCond so that clients can identifer empty tables.
Emit32(PPCond.size());
for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
Emit32(PPCond[i].first - TokenOff);
uint32_t x = PPCond[i].second;
assert(x != 0 && "PPCond entry not backpatched.");
// Emit zero for #endifs. This allows us to do checking when
// we read the PTH file back in.
Emit32(x == i ? 0 : x);
}
return PTHEntry(TokenOff, PPCondOff);
}
Offset PTHWriter::EmitCachedSpellings() {
// Write each cached strings to the PTH file.
Offset SpellingsOff = Out.tell();
for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
return SpellingsOff;
}
static uint32_t swap32le(uint32_t X) {
return llvm::support::endian::byte_swap<uint32_t, llvm::support::little>(X);
}
static void pwrite32le(raw_pwrite_stream &OS, uint32_t Val, uint64_t &Off) {
uint32_t LEVal = swap32le(Val);
OS.pwrite(reinterpret_cast<const char *>(&LEVal), 4, Off);
Off += 4;
}
void PTHWriter::GeneratePTH(const std::string &MainFile) {
// Generate the prologue.
Out << "cfe-pth" << '\0';
Emit32(PTHManager::Version);
// Leave 4 words for the prologue.
Offset PrologueOffset = Out.tell();
for (unsigned i = 0; i < 4; ++i)
Emit32(0);
// Write the name of the MainFile.
if (!MainFile.empty()) {
EmitString(MainFile);
} else {
// String with 0 bytes.
Emit16(0);
}
Emit8(0);
// Iterate over all the files in SourceManager. Create a lexer
// for each file and cache the tokens.
SourceManager &SM = PP.getSourceManager();
const LangOptions &LOpts = PP.getLangOpts();
for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
E = SM.fileinfo_end(); I != E; ++I) {
const SrcMgr::ContentCache &C = *I->second;
const FileEntry *FE = C.OrigEntry;
// FIXME: Handle files with non-absolute paths.
if (llvm::sys::path::is_relative(FE->getName()))
continue;
const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
if (!B) continue;
FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
Lexer L(FID, FromFile, SM, LOpts);
PM.insert(FE, LexTokens(L));
}
// Write out the identifier table.
const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
// Write out the cached strings table.
Offset SpellingOff = EmitCachedSpellings();
// Write out the file table.
Offset FileTableOff = EmitFileTable();
// Finally, write the prologue.
uint64_t Off = PrologueOffset;
pwrite32le(Out, IdTableOff.first, Off);
pwrite32le(Out, IdTableOff.second, Off);
pwrite32le(Out, FileTableOff, Off);
pwrite32le(Out, SpellingOff, Off);
}
namespace {
/// StatListener - A simple "interpose" object used to monitor stat calls
/// invoked by FileManager while processing the original sources used
/// as input to PTH generation. StatListener populates the PTHWriter's
/// file map with stat information for directories as well as negative stats.
/// Stat information for files are populated elsewhere.
class StatListener : public FileSystemStatCache {
PTHMap &PM;
public:
StatListener(PTHMap &pm) : PM(pm) {}
~StatListener() override {}
LookupResult getStat(const char *Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F,
vfs::FileSystem &FS) override {
LookupResult Result = statChained(Path, Data, isFile, F, FS);
if (Result == CacheMissing) // Failed 'stat'.
PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
else if (Data.IsDirectory) {
// Only cache directories with absolute paths.
if (llvm::sys::path::is_relative(Path))
return Result;
PM.insert(PTHEntryKeyVariant(&Data, Path), PTHEntry());
}
return Result;
}
};
} // end anonymous namespace
void clang::CacheTokens(Preprocessor &PP, raw_pwrite_stream *OS) {
// Get the name of the main file.
const SourceManager &SrcMgr = PP.getSourceManager();
const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
SmallString<128> MainFilePath(MainFile->getName());
llvm::sys::fs::make_absolute(MainFilePath);
// Create the PTHWriter.
PTHWriter PW(*OS, PP);
// Install the 'stat' system call listener in the FileManager.
auto StatCacheOwner = llvm::make_unique<StatListener>(PW.getPM());
StatListener *StatCache = StatCacheOwner.get();
PP.getFileManager().addStatCache(std::move(StatCacheOwner),
/*AtBeginning=*/true);
// Lex through the entire file. This will populate SourceManager with
// all of the header information.
Token Tok;
PP.EnterMainSourceFile();
do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
// Generate the PTH file.
PP.getFileManager().removeStatCache(StatCache);
PW.GeneratePTH(MainFilePath.str());
}
//===----------------------------------------------------------------------===//
namespace {
class PTHIdKey {
public:
const IdentifierInfo* II;
uint32_t FileOffset;
};
class PTHIdentifierTableTrait {
public:
typedef PTHIdKey* key_type;
typedef key_type key_type_ref;
typedef uint32_t data_type;
typedef data_type data_type_ref;
typedef unsigned hash_value_type;
typedef unsigned offset_type;
static hash_value_type ComputeHash(PTHIdKey* key) {
return llvm::HashString(key->II->getName());
}
static std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
using namespace llvm::support;
unsigned n = key->II->getLength() + 1;
endian::Writer<little>(Out).write<uint16_t>(n);
return std::make_pair(n, sizeof(uint32_t));
}
static void EmitKey(raw_ostream& Out, PTHIdKey* key, unsigned n) {
// Record the location of the key data. This is used when generating
// the mapping from persistent IDs to strings.
key->FileOffset = Out.tell();
Out.write(key->II->getNameStart(), n);
}
static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
unsigned) {
using namespace llvm::support;
endian::Writer<little>(Out).write<uint32_t>(pID);
}
};
} // end anonymous namespace
/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
/// a hashtable mapping from identifier strings to persistent IDs. The second
/// is a straight table mapping from persistent IDs to string data (the
/// keys of the first table).
///
std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
// Build two maps:
// (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
// (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
// Note that we use 'calloc', so all the bytes are 0.
PTHIdKey *IIDMap = (PTHIdKey*)calloc(idcount, sizeof(PTHIdKey));
if (IIDMap == nullptr) throw std::bad_alloc(); // HLSL Change
// Create the hashtable.
llvm::OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
// Generate mapping from persistent IDs -> IdentifierInfo*.
for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
// Decrement by 1 because we are using a vector for the lookup and
// 0 is reserved for NULL.
assert(I->second > 0);
assert(I->second-1 < idcount);
unsigned idx = I->second-1;
// Store the mapping from persistent ID to IdentifierInfo*
IIDMap[idx].II = I->first;
// Store the reverse mapping in a hashtable.
IIOffMap.insert(&IIDMap[idx], I->second);
}
// Write out the inverse map first. This causes the PCIDKey entries to
// record PTH file offsets for the string data. This is used to write
// the second table.
Offset StringTableOffset = IIOffMap.Emit(Out);
// Now emit the table mapping from persistent IDs to PTH file offsets.
Offset IDOff = Out.tell();
Emit32(idcount); // Emit the number of identifiers.
for (unsigned i = 0 ; i < idcount; ++i)
Emit32(IIDMap[i].FileOffset);
// Finally, release the inverse map.
free(IIDMap);
return std::make_pair(IDOff, StringTableOffset);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/ChainedIncludesSource.cpp | //===- ChainedIncludesSource.cpp - Chained PCHs in Memory -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ChainedIncludesSource class, which converts headers
// to chained PCHs in memory, mainly used for testing.
//
//===----------------------------------------------------------------------===//
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/ParseAST.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/Support/MemoryBuffer.h"
using namespace clang;
namespace {
class ChainedIncludesSource : public ExternalSemaSource {
public:
~ChainedIncludesSource() override;
ExternalSemaSource &getFinalReader() const { return *FinalReader; }
std::vector<CompilerInstance *> CIs;
IntrusiveRefCntPtr<ExternalSemaSource> FinalReader;
protected:
//===----------------------------------------------------------------------===//
// ExternalASTSource interface.
//===----------------------------------------------------------------------===//
Decl *GetExternalDecl(uint32_t ID) override;
Selector GetExternalSelector(uint32_t ID) override;
uint32_t GetNumExternalSelectors() override;
Stmt *GetExternalDeclStmt(uint64_t Offset) override;
CXXCtorInitializer **GetExternalCXXCtorInitializers(uint64_t Offset) override;
CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset) override;
bool FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) override;
ExternalLoadResult
FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
SmallVectorImpl<Decl *> &Result) override;
void CompleteType(TagDecl *Tag) override;
void CompleteType(ObjCInterfaceDecl *Class) override;
void StartedDeserializing() override;
void FinishedDeserializing() override;
void StartTranslationUnit(ASTConsumer *Consumer) override;
void PrintStats() override;
/// Return the amount of memory used by memory buffers, breaking down
/// by heap-backed versus mmap'ed memory.
void getMemoryBufferSizes(MemoryBufferSizes &sizes) const override;
//===----------------------------------------------------------------------===//
// ExternalSemaSource interface.
//===----------------------------------------------------------------------===//
void InitializeSema(Sema &S) override;
void ForgetSema() override;
void ReadMethodPool(Selector Sel) override;
bool LookupUnqualified(LookupResult &R, Scope *S) override;
};
}
static ASTReader *
createASTReader(CompilerInstance &CI, StringRef pchFile,
SmallVectorImpl<std::unique_ptr<llvm::MemoryBuffer>> &MemBufs,
SmallVectorImpl<std::string> &bufNames,
ASTDeserializationListener *deserialListener = nullptr) {
Preprocessor &PP = CI.getPreprocessor();
std::unique_ptr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, CI.getASTContext(),
CI.getPCHContainerReader(),
/*isysroot=*/"", /*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
StringRef sr(bufNames[ti]);
Reader->addInMemoryBuffer(sr, std::move(MemBufs[ti]));
}
Reader->setDeserializationListener(deserialListener);
switch (Reader->ReadAST(pchFile, serialization::MK_PCH, SourceLocation(),
ASTReader::ARR_None)) {
case ASTReader::Success:
// Set the predefines buffer as suggested by the PCH reader.
PP.setPredefines(Reader->getSuggestedPredefines());
return Reader.release();
case ASTReader::Failure:
case ASTReader::Missing:
case ASTReader::OutOfDate:
case ASTReader::VersionMismatch:
case ASTReader::ConfigurationMismatch:
case ASTReader::HadErrors:
break;
}
return nullptr;
}
ChainedIncludesSource::~ChainedIncludesSource() {
for (unsigned i = 0, e = CIs.size(); i != e; ++i)
delete CIs[i];
}
IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
CompilerInstance &CI, IntrusiveRefCntPtr<ExternalSemaSource> &Reader) {
std::vector<std::string> &includes = CI.getPreprocessorOpts().ChainedIncludes;
assert(!includes.empty() && "No '-chain-include' in options!");
IntrusiveRefCntPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
InputKind IK = CI.getFrontendOpts().Inputs[0].getKind();
SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> SerialBufs;
SmallVector<std::string, 4> serialBufNames;
for (unsigned i = 0, e = includes.size(); i != e; ++i) {
bool firstInclude = (i == 0);
std::unique_ptr<CompilerInvocation> CInvok;
CInvok.reset(new CompilerInvocation(CI.getInvocation()));
CInvok->getPreprocessorOpts().ChainedIncludes.clear();
CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
CInvok->getPreprocessorOpts().DisablePCHValidation = true;
CInvok->getPreprocessorOpts().Includes.clear();
CInvok->getPreprocessorOpts().MacroIncludes.clear();
CInvok->getPreprocessorOpts().Macros.clear();
CInvok->getFrontendOpts().Inputs.clear();
FrontendInputFile InputFile(includes[i], IK);
CInvok->getFrontendOpts().Inputs.push_back(InputFile);
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(llvm::errs(), new DiagnosticOptions());
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(), DiagClient));
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(CI.getPCHContainerOperations()));
Clang->setInvocation(CInvok.release());
Clang->setDiagnostics(Diags.get());
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
Clang->createFileManager();
Clang->createSourceManager(Clang->getFileManager());
Clang->createPreprocessor(TU_Prefix);
Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(),
&Clang->getPreprocessor());
Clang->createASTContext();
auto Buffer = std::make_shared<PCHBuffer>();
auto consumer = llvm::make_unique<PCHGenerator>(
Clang->getPreprocessor(), "-", nullptr, /*isysroot=*/"", Buffer);
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
Clang->setASTConsumer(std::move(consumer));
Clang->createSema(TU_Prefix, nullptr);
if (firstInclude) {
Preprocessor &PP = Clang->getPreprocessor();
PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
PP.getLangOpts());
} else {
assert(!SerialBufs.empty());
SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> Bufs;
// TODO: Pass through the existing MemoryBuffer instances instead of
// allocating new ones.
for (auto &SB : SerialBufs)
Bufs.push_back(llvm::MemoryBuffer::getMemBuffer(SB->getBuffer()));
std::string pchName = includes[i-1];
llvm::raw_string_ostream os(pchName);
os << ".pch" << i-1;
serialBufNames.push_back(os.str());
IntrusiveRefCntPtr<ASTReader> Reader;
Reader = createASTReader(
*Clang, pchName, Bufs, serialBufNames,
Clang->getASTConsumer().GetASTDeserializationListener());
if (!Reader)
return nullptr;
Clang->setModuleManager(Reader);
Clang->getASTContext().setExternalSource(Reader);
}
if (!Clang->InitializeSourceManager(InputFile))
return nullptr;
ParseAST(Clang->getSema());
Clang->getDiagnosticClient().EndSourceFile();
assert(Buffer->IsComplete && "serialization did not complete");
auto &serialAST = Buffer->Data;
SerialBufs.push_back(llvm::MemoryBuffer::getMemBufferCopy(
StringRef(serialAST.data(), serialAST.size())));
serialAST.clear();
source->CIs.push_back(Clang.release());
}
assert(!SerialBufs.empty());
std::string pchName = includes.back() + ".pch-final";
serialBufNames.push_back(pchName);
Reader = createASTReader(CI, pchName, SerialBufs, serialBufNames);
if (!Reader)
return nullptr;
source->FinalReader = Reader;
return source;
}
//===----------------------------------------------------------------------===//
// ExternalASTSource interface.
//===----------------------------------------------------------------------===//
Decl *ChainedIncludesSource::GetExternalDecl(uint32_t ID) {
return getFinalReader().GetExternalDecl(ID);
}
Selector ChainedIncludesSource::GetExternalSelector(uint32_t ID) {
return getFinalReader().GetExternalSelector(ID);
}
uint32_t ChainedIncludesSource::GetNumExternalSelectors() {
return getFinalReader().GetNumExternalSelectors();
}
Stmt *ChainedIncludesSource::GetExternalDeclStmt(uint64_t Offset) {
return getFinalReader().GetExternalDeclStmt(Offset);
}
CXXBaseSpecifier *
ChainedIncludesSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return getFinalReader().GetExternalCXXBaseSpecifiers(Offset);
}
CXXCtorInitializer **
ChainedIncludesSource::GetExternalCXXCtorInitializers(uint64_t Offset) {
return getFinalReader().GetExternalCXXCtorInitializers(Offset);
}
bool
ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) {
return getFinalReader().FindExternalVisibleDeclsByName(DC, Name);
}
ExternalLoadResult
ChainedIncludesSource::FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
SmallVectorImpl<Decl*> &Result) {
return getFinalReader().FindExternalLexicalDecls(DC, isKindWeWant, Result);
}
void ChainedIncludesSource::CompleteType(TagDecl *Tag) {
return getFinalReader().CompleteType(Tag);
}
void ChainedIncludesSource::CompleteType(ObjCInterfaceDecl *Class) {
return getFinalReader().CompleteType(Class);
}
void ChainedIncludesSource::StartedDeserializing() {
return getFinalReader().StartedDeserializing();
}
void ChainedIncludesSource::FinishedDeserializing() {
return getFinalReader().FinishedDeserializing();
}
void ChainedIncludesSource::StartTranslationUnit(ASTConsumer *Consumer) {
return getFinalReader().StartTranslationUnit(Consumer);
}
void ChainedIncludesSource::PrintStats() {
return getFinalReader().PrintStats();
}
void ChainedIncludesSource::getMemoryBufferSizes(MemoryBufferSizes &sizes)const{
for (unsigned i = 0, e = CIs.size(); i != e; ++i) {
if (const ExternalASTSource *eSrc =
CIs[i]->getASTContext().getExternalSource()) {
eSrc->getMemoryBufferSizes(sizes);
}
}
getFinalReader().getMemoryBufferSizes(sizes);
}
void ChainedIncludesSource::InitializeSema(Sema &S) {
return getFinalReader().InitializeSema(S);
}
void ChainedIncludesSource::ForgetSema() {
return getFinalReader().ForgetSema();
}
void ChainedIncludesSource::ReadMethodPool(Selector Sel) {
getFinalReader().ReadMethodPool(Sel);
}
bool ChainedIncludesSource::LookupUnqualified(LookupResult &R, Scope *S) {
return getFinalReader().LookupUnqualified(R, S);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/ASTMerge.cpp | //===-- ASTMerge.cpp - AST Merging Frontent Action --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/ASTUnit.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTImporter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
std::unique_ptr<ASTConsumer>
ASTMergeAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return AdaptedAction->CreateASTConsumer(CI, InFile);
}
bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
StringRef Filename) {
// FIXME: This is a hack. We need a better way to communicate the
// AST file, compiler instance, and file name than member variables
// of FrontendAction.
AdaptedAction->setCurrentInput(getCurrentInput(), takeCurrentASTUnit());
AdaptedAction->setCompilerInstance(&CI);
return AdaptedAction->BeginSourceFileAction(CI, Filename);
}
void ASTMergeAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
CI.getDiagnostics().getClient()->BeginSourceFile(
CI.getASTContext().getLangOpts());
CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
&CI.getASTContext());
IntrusiveRefCntPtr<DiagnosticIDs>
DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
IntrusiveRefCntPtr<DiagnosticsEngine>
Diags(new DiagnosticsEngine(DiagIDs, &CI.getDiagnosticOpts(),
new ForwardingDiagnosticConsumer(
*CI.getDiagnostics().getClient()),
/*ShouldOwnClient=*/true));
std::unique_ptr<ASTUnit> Unit =
ASTUnit::LoadFromASTFile(ASTFiles[I], CI.getPCHContainerReader(),
Diags, CI.getFileSystemOpts(), false);
if (!Unit)
continue;
ASTImporter Importer(CI.getASTContext(),
CI.getFileManager(),
Unit->getASTContext(),
Unit->getFileManager(),
/*MinimalImport=*/false);
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
CI.getASTConsumer().Initialize(CI.getASTContext());
for (auto *D : TU->decls()) {
// Don't re-import __va_list_tag, __builtin_va_list.
if (const auto *ND = dyn_cast<NamedDecl>(D))
if (IdentifierInfo *II = ND->getIdentifier())
if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list"))
continue;
Decl *ToD = Importer.Import(D);
if (ToD) {
DeclGroupRef DGR(ToD);
CI.getASTConsumer().HandleTopLevelDecl(DGR);
}
}
}
AdaptedAction->ExecuteAction();
CI.getDiagnostics().getClient()->EndSourceFile();
}
void ASTMergeAction::EndSourceFileAction() {
return AdaptedAction->EndSourceFileAction();
}
ASTMergeAction::ASTMergeAction(FrontendAction *AdaptedAction,
ArrayRef<std::string> ASTFiles)
: AdaptedAction(AdaptedAction), ASTFiles(ASTFiles.begin(), ASTFiles.end()) {
assert(AdaptedAction && "ASTMergeAction needs an action to adapt");
}
ASTMergeAction::~ASTMergeAction() {
delete AdaptedAction;
}
bool ASTMergeAction::usesPreprocessorOnly() const {
return AdaptedAction->usesPreprocessorOnly();
}
TranslationUnitKind ASTMergeAction::getTranslationUnitKind() {
return AdaptedAction->getTranslationUnitKind();
}
bool ASTMergeAction::hasPCHSupport() const {
return AdaptedAction->hasPCHSupport();
}
bool ASTMergeAction::hasASTFileSupport() const {
return AdaptedAction->hasASTFileSupport();
}
bool ASTMergeAction::hasCodeCompletionSupport() const {
return AdaptedAction->hasCodeCompletionSupport();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp | //===--- TextDiagnosticPrinter.cpp - Diagnostic Printer -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This diagnostic client prints out their diagnostic messages.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/TextDiagnostic.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
DiagnosticOptions *diags,
bool _OwnsOutputStream)
: OS(os), DiagOpts(diags),
OwnsOutputStream(_OwnsOutputStream) {
}
TextDiagnosticPrinter::~TextDiagnosticPrinter() {
if (OwnsOutputStream)
delete &OS;
}
void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
const Preprocessor *PP) {
// Build the TextDiagnostic utility.
TextDiag.reset(new TextDiagnostic(OS, LO, &*DiagOpts));
}
void TextDiagnosticPrinter::EndSourceFile() {
TextDiag.reset();
}
/// \brief Print any diagnostic option information to a raw_ostream.
///
/// This implements all of the logic for adding diagnostic options to a message
/// (via OS). Each relevant option is comma separated and all are enclosed in
/// the standard bracketing: " [...]".
static void printDiagnosticOptions(raw_ostream &OS,
DiagnosticsEngine::Level Level,
const Diagnostic &Info,
const DiagnosticOptions &DiagOpts) {
bool Started = false;
if (DiagOpts.ShowOptionNames) {
// Handle special cases for non-warnings early.
if (Info.getID() == diag::fatal_too_many_errors) {
OS << " [-ferror-limit=]";
return;
}
// The code below is somewhat fragile because we are essentially trying to
// report to the user what happened by inferring what the diagnostic engine
// did. Eventually it might make more sense to have the diagnostic engine
// include some "why" information in the diagnostic.
// If this is a warning which has been mapped to an error by the user (as
// inferred by checking whether the default mapping is to an error) then
// flag it as such. Note that diagnostics could also have been mapped by a
// pragma, but we don't currently have a way to distinguish this.
if (Level == DiagnosticsEngine::Error &&
DiagnosticIDs::isBuiltinWarningOrExtension(Info.getID()) &&
!DiagnosticIDs::isDefaultMappingAsError(Info.getID())) {
OS << " [-Werror";
Started = true;
}
StringRef Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID());
if (!Opt.empty()) {
OS << (Started ? "," : " [")
<< (Level == DiagnosticsEngine::Remark ? "-R" : "-W") << Opt;
StringRef OptValue = Info.getDiags()->getFlagValue();
if (!OptValue.empty())
OS << "=" << OptValue;
Started = true;
}
}
// If the user wants to see category information, include it too.
if (DiagOpts.ShowCategories) {
unsigned DiagCategory =
DiagnosticIDs::getCategoryNumberForDiag(Info.getID());
if (DiagCategory) {
OS << (Started ? "," : " [");
Started = true;
if (DiagOpts.ShowCategories == 1)
OS << DiagCategory;
else {
assert(DiagOpts.ShowCategories == 2 && "Invalid ShowCategories value");
OS << DiagnosticIDs::getCategoryNameFromID(DiagCategory);
}
}
}
if (Started)
OS << ']';
}
void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(Level, Info);
// Render the diagnostic message into a temporary buffer eagerly. We'll use
// this later as we print out the diagnostic to the terminal.
SmallString<100> OutStr;
Info.FormatDiagnostic(OutStr);
llvm::raw_svector_ostream DiagMessageStream(OutStr);
printDiagnosticOptions(DiagMessageStream, Level, Info, *DiagOpts);
// Keeps track of the starting position of the location
// information (e.g., "foo.c:10:4:") that precedes the error
// message. We use this information to determine how long the
// file+line+column number prefix is.
uint64_t StartOfLocationInfo = OS.tell();
if (!Prefix.empty())
OS << Prefix << ": ";
// Use a dedicated, simpler path for diagnostics without a valid location.
// This is important as if the location is missing, we may be emitting
// diagnostics in a context that lacks language options, a source manager, or
// other infrastructure necessary when emitting more rich diagnostics.
if (!Info.getLocation().isValid()) {
TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
DiagOpts->CLFallbackMode);
TextDiagnostic::printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
OS.tell() - StartOfLocationInfo,
DiagOpts->MessageLength,
DiagOpts->ShowColors);
OS.flush();
return;
}
// Assert that the rest of our infrastructure is setup properly.
assert(DiagOpts && "Unexpected diagnostic without options set");
assert(Info.hasSourceManager() &&
"Unexpected diagnostic with no source manager");
assert(TextDiag && "Unexpected diagnostic outside source file processing");
TextDiag->emitDiagnostic(Info.getLocation(), Level, DiagMessageStream.str(),
Info.getRanges(),
Info.getFixItHints(),
&Info.getSourceManager());
OS.flush();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/ASTConsumers.cpp | //===--- ASTConsumers.cpp - ASTConsumer implementations -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// AST Consumer Implementations.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/ASTConsumers.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
//===----------------------------------------------------------------------===//
/// ASTPrinter - Pretty-printer and dumper of ASTs
namespace {
class ASTPrinter : public ASTConsumer,
public RecursiveASTVisitor<ASTPrinter> {
typedef RecursiveASTVisitor<ASTPrinter> base;
public:
ASTPrinter(raw_ostream *Out = nullptr, bool Dump = false,
StringRef FilterString = "", bool DumpLookups = false)
: Out(Out ? *Out : llvm::outs()), Dump(Dump),
FilterString(FilterString), DumpLookups(DumpLookups) {}
void HandleTranslationUnit(ASTContext &Context) override {
TranslationUnitDecl *D = Context.getTranslationUnitDecl();
if (FilterString.empty())
return print(D);
TraverseDecl(D);
}
bool shouldWalkTypesOfTypeLocs() const { return false; }
bool TraverseDecl(Decl *D) {
if (D && filterMatches(D)) {
bool ShowColors = Out.has_colors();
if (ShowColors)
Out.changeColor(raw_ostream::BLUE);
Out << ((Dump || DumpLookups) ? "Dumping " : "Printing ") << getName(D)
<< ":\n";
if (ShowColors)
Out.resetColor();
print(D);
Out << "\n";
// Don't traverse child nodes to avoid output duplication.
return true;
}
return base::TraverseDecl(D);
}
private:
std::string getName(Decl *D) {
if (isa<NamedDecl>(D))
return cast<NamedDecl>(D)->getQualifiedNameAsString();
return "";
}
bool filterMatches(Decl *D) {
return getName(D).find(FilterString) != std::string::npos;
}
void print(Decl *D) {
if (DumpLookups) {
if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
if (DC == DC->getPrimaryContext())
DC->dumpLookups(Out, Dump);
else
Out << "Lookup map is in primary DeclContext "
<< DC->getPrimaryContext() << "\n";
} else
Out << "Not a DeclContext\n";
} else if (Dump)
D->dump(Out);
else
D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
}
raw_ostream &Out;
bool Dump;
std::string FilterString;
bool DumpLookups;
};
class ASTDeclNodeLister : public ASTConsumer,
public RecursiveASTVisitor<ASTDeclNodeLister> {
public:
ASTDeclNodeLister(raw_ostream *Out = nullptr)
: Out(Out ? *Out : llvm::outs()) {}
void HandleTranslationUnit(ASTContext &Context) override {
TraverseDecl(Context.getTranslationUnitDecl());
}
bool shouldWalkTypesOfTypeLocs() const { return false; }
bool VisitNamedDecl(NamedDecl *D) {
D->printQualifiedName(Out);
Out << '\n';
return true;
}
private:
raw_ostream &Out;
};
} // end anonymous namespace
std::unique_ptr<ASTConsumer> clang::CreateASTPrinter(raw_ostream *Out,
StringRef FilterString) {
return llvm::make_unique<ASTPrinter>(Out, /*Dump=*/false, FilterString);
}
std::unique_ptr<ASTConsumer> clang::CreateASTDumper(raw_ostream *Out, // HLSL Change - explicit Out
StringRef FilterString,
bool DumpDecls,
bool DumpLookups) {
assert((DumpDecls || DumpLookups) && "nothing to dump");
return llvm::make_unique<ASTPrinter>(Out, DumpDecls, FilterString, // HLSL Change - explicit Out
DumpLookups);
}
std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
return llvm::make_unique<ASTDeclNodeLister>(nullptr);
}
//===----------------------------------------------------------------------===//
/// ASTViewer - AST Visualization
namespace {
class ASTViewer : public ASTConsumer {
ASTContext *Context;
public:
void Initialize(ASTContext &Context) override {
this->Context = &Context;
}
bool HandleTopLevelDecl(DeclGroupRef D) override {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
HandleTopLevelSingleDecl(*I);
return true;
}
void HandleTopLevelSingleDecl(Decl *D);
};
}
void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
D->print(llvm::errs());
if (Stmt *Body = D->getBody()) {
llvm::errs() << '\n';
Body->viewAST();
llvm::errs() << '\n';
}
}
}
std::unique_ptr<ASTConsumer> clang::CreateASTViewer() {
return llvm::make_unique<ASTViewer>();
}
//===----------------------------------------------------------------------===//
/// DeclContextPrinter - Decl and DeclContext Visualization
namespace {
class DeclContextPrinter : public ASTConsumer {
raw_ostream& Out;
public:
DeclContextPrinter() : Out(llvm::errs()) {}
void HandleTranslationUnit(ASTContext &C) override {
PrintDeclContext(C.getTranslationUnitDecl(), 4);
}
void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
};
} // end anonymous namespace
void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
unsigned Indentation) {
// Print DeclContext name.
switch (DC->getDeclKind()) {
case Decl::TranslationUnit:
Out << "[translation unit] " << DC;
break;
case Decl::Namespace: {
Out << "[namespace] ";
const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
Out << *ND;
break;
}
case Decl::Enum: {
const EnumDecl* ED = cast<EnumDecl>(DC);
if (ED->isCompleteDefinition())
Out << "[enum] ";
else
Out << "<enum> ";
Out << *ED;
break;
}
case Decl::Record: {
const RecordDecl* RD = cast<RecordDecl>(DC);
if (RD->isCompleteDefinition())
Out << "[struct] ";
else
Out << "<struct> ";
Out << *RD;
break;
}
case Decl::CXXRecord: {
const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
if (RD->isCompleteDefinition())
Out << "[class] ";
else
Out << "<class> ";
Out << *RD << ' ' << DC;
break;
}
case Decl::ObjCMethod:
Out << "[objc method]";
break;
case Decl::ObjCInterface:
Out << "[objc interface]";
break;
case Decl::ObjCCategory:
Out << "[objc category]";
break;
case Decl::ObjCProtocol:
Out << "[objc protocol]";
break;
case Decl::ObjCImplementation:
Out << "[objc implementation]";
break;
case Decl::ObjCCategoryImpl:
Out << "[objc categoryimpl]";
break;
case Decl::LinkageSpec:
Out << "[linkage spec]";
break;
case Decl::Block:
Out << "[block]";
break;
case Decl::Function: {
const FunctionDecl* FD = cast<FunctionDecl>(DC);
if (FD->doesThisDeclarationHaveABody())
Out << "[function] ";
else
Out << "<function> ";
Out << *FD;
// Print the parameters.
Out << "(";
bool PrintComma = false;
for (auto I : FD->params()) {
if (PrintComma)
Out << ", ";
else
PrintComma = true;
Out << *I;
}
Out << ")";
break;
}
case Decl::CXXMethod: {
const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
if (D->isOutOfLine())
Out << "[c++ method] ";
else if (D->isImplicit())
Out << "(c++ method) ";
else
Out << "<c++ method> ";
Out << *D;
// Print the parameters.
Out << "(";
bool PrintComma = false;
for (FunctionDecl::param_const_iterator I = D->param_begin(),
E = D->param_end(); I != E; ++I) {
if (PrintComma)
Out << ", ";
else
PrintComma = true;
Out << **I;
}
Out << ")";
// Check the semantic DeclContext.
const DeclContext* SemaDC = D->getDeclContext();
const DeclContext* LexicalDC = D->getLexicalDeclContext();
if (SemaDC != LexicalDC)
Out << " [[" << SemaDC << "]]";
break;
}
case Decl::CXXConstructor: {
const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
if (D->isOutOfLine())
Out << "[c++ ctor] ";
else if (D->isImplicit())
Out << "(c++ ctor) ";
else
Out << "<c++ ctor> ";
Out << *D;
// Print the parameters.
Out << "(";
bool PrintComma = false;
for (FunctionDecl::param_const_iterator I = D->param_begin(),
E = D->param_end(); I != E; ++I) {
if (PrintComma)
Out << ", ";
else
PrintComma = true;
Out << **I;
}
Out << ")";
// Check the semantic DC.
const DeclContext* SemaDC = D->getDeclContext();
const DeclContext* LexicalDC = D->getLexicalDeclContext();
if (SemaDC != LexicalDC)
Out << " [[" << SemaDC << "]]";
break;
}
case Decl::CXXDestructor: {
const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
if (D->isOutOfLine())
Out << "[c++ dtor] ";
else if (D->isImplicit())
Out << "(c++ dtor) ";
else
Out << "<c++ dtor> ";
Out << *D;
// Check the semantic DC.
const DeclContext* SemaDC = D->getDeclContext();
const DeclContext* LexicalDC = D->getLexicalDeclContext();
if (SemaDC != LexicalDC)
Out << " [[" << SemaDC << "]]";
break;
}
case Decl::CXXConversion: {
const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
if (D->isOutOfLine())
Out << "[c++ conversion] ";
else if (D->isImplicit())
Out << "(c++ conversion) ";
else
Out << "<c++ conversion> ";
Out << *D;
// Check the semantic DC.
const DeclContext* SemaDC = D->getDeclContext();
const DeclContext* LexicalDC = D->getLexicalDeclContext();
if (SemaDC != LexicalDC)
Out << " [[" << SemaDC << "]]";
break;
}
default:
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
Out << "\n";
// Print decls in the DeclContext.
for (auto *I : DC->decls()) {
for (unsigned i = 0; i < Indentation; ++i)
Out << " ";
Decl::Kind DK = I->getKind();
switch (DK) {
case Decl::Namespace:
case Decl::Enum:
case Decl::Record:
case Decl::CXXRecord:
case Decl::ObjCMethod:
case Decl::ObjCInterface:
case Decl::ObjCCategory:
case Decl::ObjCProtocol:
case Decl::ObjCImplementation:
case Decl::ObjCCategoryImpl:
case Decl::LinkageSpec:
case Decl::Block:
case Decl::Function:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion:
{
DeclContext* DC = cast<DeclContext>(I);
PrintDeclContext(DC, Indentation+2);
break;
}
case Decl::IndirectField: {
IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(I);
Out << "<IndirectField> " << *IFD << '\n';
break;
}
case Decl::Label: {
LabelDecl *LD = cast<LabelDecl>(I);
Out << "<Label> " << *LD << '\n';
break;
}
case Decl::Field: {
FieldDecl *FD = cast<FieldDecl>(I);
Out << "<field> " << *FD << '\n';
break;
}
case Decl::Typedef:
case Decl::TypeAlias: {
TypedefNameDecl* TD = cast<TypedefNameDecl>(I);
Out << "<typedef> " << *TD << '\n';
break;
}
case Decl::EnumConstant: {
EnumConstantDecl* ECD = cast<EnumConstantDecl>(I);
Out << "<enum constant> " << *ECD << '\n';
break;
}
case Decl::Var: {
VarDecl* VD = cast<VarDecl>(I);
Out << "<var> " << *VD << '\n';
break;
}
case Decl::ImplicitParam: {
ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(I);
Out << "<implicit parameter> " << *IPD << '\n';
break;
}
case Decl::ParmVar: {
ParmVarDecl* PVD = cast<ParmVarDecl>(I);
Out << "<parameter> " << *PVD << '\n';
break;
}
case Decl::ObjCProperty: {
ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(I);
Out << "<objc property> " << *OPD << '\n';
break;
}
case Decl::FunctionTemplate: {
FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(I);
Out << "<function template> " << *FTD << '\n';
break;
}
case Decl::FileScopeAsm: {
Out << "<file-scope asm>\n";
break;
}
case Decl::UsingDirective: {
Out << "<using directive>\n";
break;
}
case Decl::NamespaceAlias: {
NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(I);
Out << "<namespace alias> " << *NAD << '\n';
break;
}
case Decl::ClassTemplate: {
ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(I);
Out << "<class template> " << *CTD << '\n';
break;
}
case Decl::OMPThreadPrivate: {
Out << "<omp threadprivate> " << '"' << I << "\"\n";
break;
}
default:
Out << "DeclKind: " << DK << '"' << I << "\"\n";
llvm_unreachable("decl unhandled");
}
}
}
std::unique_ptr<ASTConsumer> clang::CreateDeclContextPrinter() {
return llvm::make_unique<DeclContextPrinter>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/ASTUnit.cpp | //===--- ASTUnit.cpp - ASTUnit utility ------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// ASTUnit Implementation.
//
//===----------------------------------------------------------------------===//
#include "clang/Frontend/ASTUnit.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/Sema.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <atomic>
#include <cstdio>
#include <cstdlib>
#include "clang/Frontend/VerifyDiagnosticConsumer.h" // HLSL Change
#include "llvm/Support/ManagedStatic.h" // HLSL Change
using namespace clang;
using llvm::TimeRecord;
namespace {
class SimpleTimer {
bool WantTiming;
TimeRecord Start;
std::string Output;
public:
explicit SimpleTimer(bool WantTiming) : WantTiming(WantTiming) {
if (WantTiming)
Start = TimeRecord::getCurrentTime();
}
void setOutput(const Twine &Output) {
if (WantTiming)
this->Output = Output.str();
}
~SimpleTimer() {
if (WantTiming) {
TimeRecord Elapsed = TimeRecord::getCurrentTime();
Elapsed -= Start;
llvm::errs() << Output << ':';
Elapsed.print(Elapsed, llvm::errs());
llvm::errs() << '\n';
}
}
};
struct OnDiskData {
/// \brief The file in which the precompiled preamble is stored.
std::string PreambleFile;
/// \brief Temporary files that should be removed when the ASTUnit is
/// destroyed.
SmallVector<std::string, 4> TemporaryFiles;
/// \brief Erase temporary files.
void CleanTemporaryFiles();
/// \brief Erase the preamble file.
void CleanPreambleFile();
/// \brief Erase temporary files and the preamble file.
void Cleanup();
};
}
static llvm::sys::SmartMutex<false> &getOnDiskMutex() {
static llvm::sys::SmartMutex<false> M(/* recursive = */ true);
return M;
}
// HLSL Change: use ManagedStatic
typedef llvm::DenseMap<const ASTUnit *,
std::unique_ptr<OnDiskData>> OnDiskDataMap;
static OnDiskDataMap &getOnDiskDataMap() {
static llvm::ManagedStatic<OnDiskDataMap> M;
return *M;
}
static OnDiskData &getOnDiskData(const ASTUnit *AU) {
// We require the mutex since we are modifying the structure of the
// DenseMap.
llvm::MutexGuard Guard(getOnDiskMutex());
OnDiskDataMap &M = getOnDiskDataMap();
auto &D = M[AU];
if (!D)
D = llvm::make_unique<OnDiskData>();
return *D;
}
#if 0 // HLSL Change Starts - no support for PCH
static void erasePreambleFile(const ASTUnit *AU) {
getOnDiskData(AU).CleanPreambleFile();
}
static void removeOnDiskEntry(const ASTUnit *AU) {
// We require the mutex since we are modifying the structure of the
// DenseMap.
llvm::MutexGuard Guard(getOnDiskMutex());
OnDiskDataMap &M = getOnDiskDataMap();
OnDiskDataMap::iterator I = M.find(AU);
if (I != M.end()) {
I->second->Cleanup();
M.erase(I);
}
}
static void setPreambleFile(const ASTUnit *AU, StringRef preambleFile) {
getOnDiskData(AU).PreambleFile = preambleFile;
}
#endif // HLSL Change Ends - no support for PCH
static const std::string &getPreambleFile(const ASTUnit *AU) {
return getOnDiskData(AU).PreambleFile;
}
void OnDiskData::CleanTemporaryFiles() {
for (StringRef File : TemporaryFiles)
llvm::sys::fs::remove(File);
TemporaryFiles.clear();
}
void OnDiskData::CleanPreambleFile() {
if (!PreambleFile.empty()) {
llvm::sys::fs::remove(PreambleFile);
PreambleFile.clear();
}
}
void OnDiskData::Cleanup() {
CleanTemporaryFiles();
CleanPreambleFile();
}
#if 0 // HLSL Change Starts - no support for serialization
struct ASTUnit::ASTWriterData {
SmallString<128> Buffer;
llvm::BitstreamWriter Stream;
ASTWriter Writer;
ASTWriterData() : Stream(Buffer), Writer(Stream) { }
};
#endif // HLSL Change Ends - no support for serialization
void ASTUnit::clearFileLevelDecls() {
llvm::DeleteContainerSeconds(FileDecls);
}
void ASTUnit::CleanTemporaryFiles() {
// getOnDiskData(this).CleanTemporaryFiles(); // HLSL Change - no temporary files generated
}
void ASTUnit::addTemporaryFile(StringRef TempFile) {
// getOnDiskData(this).TemporaryFiles.push_back(TempFile); // HLSL Change - no temporary files generated
assert("caller attempted to create a temporary file");
}
/// \brief After failing to build a precompiled preamble (due to
/// errors in the source that occurs in the preamble), the number of
/// reparses during which we'll skip even trying to precompile the
/// preamble.
//const unsigned DefaultPreambleRebuildInterval = 5; // HLSL Change Starts - no support for PCH
/// \brief Tracks the number of ASTUnit objects that are currently active.
///
/// Used for debugging purposes only.
static std::atomic<unsigned> ActiveASTUnitObjects;
ASTUnit::ASTUnit(bool _MainFileIsAST)
: Reader(nullptr), HadModuleLoaderFatalFailure(false),
OnlyLocalDecls(false), CaptureDiagnostics(false),
MainFileIsAST(_MainFileIsAST),
TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
OwnsRemappedFileBuffers(true),
NumStoredDiagnosticsFromDriver(0),
PreambleRebuildCounter(0),
HlslLangExtensions(nullptr), // HLSL Change
NumWarningsInPreamble(0),
ShouldCacheCodeCompletionResults(false),
IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
CompletionCacheTopLevelHashValue(0),
PreambleTopLevelHashValue(0),
CurrentTopLevelHashValue(0),
UnsafeToFree(false) {
if (getenv("LIBCLANG_OBJTRACKING"))
fprintf(stderr, "+++ %u translation units\n", ++ActiveASTUnitObjects);
}
ASTUnit::~ASTUnit() {
// If we loaded from an AST file, balance out the BeginSourceFile call.
if (MainFileIsAST && getDiagnostics().getClient()) {
getDiagnostics().getClient()->EndSourceFile();
}
clearFileLevelDecls();
// Clean up the temporary files and the preamble file.
// removeOnDiskEntry(this); // HLSL Change - no temporary/preamble files generated.
// Free the buffers associated with remapped files. We are required to
// perform this operation here because we explicitly request that the
// compiler instance *not* free these buffers for each invocation of the
// parser.
if (Invocation.get() && OwnsRemappedFileBuffers) {
PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
for (const auto &RB : PPOpts.RemappedFileBuffers)
delete RB.second;
}
ClearCachedCompletionResults();
if (getenv("LIBCLANG_OBJTRACKING"))
fprintf(stderr, "--- %u translation units\n", --ActiveASTUnitObjects);
}
void ASTUnit::setPreprocessor(Preprocessor *pp) { PP = pp; }
/// \brief Determine the set of code-completion contexts in which this
/// declaration should be shown.
static unsigned getDeclShowContexts(const NamedDecl *ND,
const LangOptions &LangOpts,
bool &IsNestedNameSpecifier) {
IsNestedNameSpecifier = false;
if (isa<UsingShadowDecl>(ND))
ND = dyn_cast<NamedDecl>(ND->getUnderlyingDecl());
if (!ND)
return 0;
uint64_t Contexts = 0;
if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND)) {
// Types can appear in these contexts.
if (LangOpts.CPlusPlus || !isa<TagDecl>(ND))
Contexts |= (1LL << CodeCompletionContext::CCC_TopLevel)
| (1LL << CodeCompletionContext::CCC_ObjCIvarList)
| (1LL << CodeCompletionContext::CCC_ClassStructUnion)
| (1LL << CodeCompletionContext::CCC_Statement)
| (1LL << CodeCompletionContext::CCC_Type)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
// In C++, types can appear in expressions contexts (for functional casts).
if (LangOpts.CPlusPlus)
Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
// In Objective-C, message sends can send interfaces. In Objective-C++,
// all types are available due to functional casts.
if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
Contexts |= (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
// In Objective-C, you can only be a subclass of another Objective-C class
if (isa<ObjCInterfaceDecl>(ND))
Contexts |= (1LL << CodeCompletionContext::CCC_ObjCInterfaceName);
// Deal with tag names.
if (isa<EnumDecl>(ND)) {
Contexts |= (1LL << CodeCompletionContext::CCC_EnumTag);
// Part of the nested-name-specifier in C++0x.
if (LangOpts.CPlusPlus11)
IsNestedNameSpecifier = true;
} else if (const RecordDecl *Record = dyn_cast<RecordDecl>(ND)) {
if (Record->isUnion())
Contexts |= (1LL << CodeCompletionContext::CCC_UnionTag);
else
Contexts |= (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
if (LangOpts.CPlusPlus)
IsNestedNameSpecifier = true;
} else if (isa<ClassTemplateDecl>(ND))
IsNestedNameSpecifier = true;
} else if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
// Values can appear in these contexts.
Contexts = (1LL << CodeCompletionContext::CCC_Statement)
| (1LL << CodeCompletionContext::CCC_Expression)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
| (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
} else if (isa<ObjCProtocolDecl>(ND)) {
Contexts = (1LL << CodeCompletionContext::CCC_ObjCProtocolName);
} else if (isa<ObjCCategoryDecl>(ND)) {
Contexts = (1LL << CodeCompletionContext::CCC_ObjCCategoryName);
} else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
Contexts = (1LL << CodeCompletionContext::CCC_Namespace);
// Part of the nested-name-specifier.
IsNestedNameSpecifier = true;
}
return Contexts;
}
void ASTUnit::CacheCodeCompletionResults() {
if (!TheSema)
return;
SimpleTimer Timer(WantTiming);
Timer.setOutput("Cache global code completions for " + getMainFileName());
// Clear out the previous results.
ClearCachedCompletionResults();
// Gather the set of global code completions.
typedef CodeCompletionResult Result;
SmallVector<Result, 8> Results;
CachedCompletionAllocator = new GlobalCodeCompletionAllocator;
CodeCompletionTUInfo CCTUInfo(CachedCompletionAllocator);
TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
CCTUInfo, Results);
// Translate global code completions into cached completions.
llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
CodeCompletionContext CCContext(CodeCompletionContext::CCC_TopLevel);
for (Result &R : Results) {
switch (R.Kind) {
case Result::RK_Declaration: {
bool IsNestedNameSpecifier = false;
CachedCodeCompletionResult CachedResult;
CachedResult.Completion = R.CreateCodeCompletionString(
*TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts = getDeclShowContexts(
R.Declaration, Ctx->getLangOpts(), IsNestedNameSpecifier);
CachedResult.Priority = R.Priority;
CachedResult.Kind = R.CursorKind;
CachedResult.Availability = R.Availability;
// Keep track of the type of this completion in an ASTContext-agnostic
// way.
QualType UsageType = getDeclUsageType(*Ctx, R.Declaration);
if (UsageType.isNull()) {
CachedResult.TypeClass = STC_Void;
CachedResult.Type = 0;
} else {
CanQualType CanUsageType
= Ctx->getCanonicalType(UsageType.getUnqualifiedType());
CachedResult.TypeClass = getSimplifiedTypeClass(CanUsageType);
// Determine whether we have already seen this type. If so, we save
// ourselves the work of formatting the type string by using the
// temporary, CanQualType-based hash table to find the associated value.
unsigned &TypeValue = CompletionTypes[CanUsageType];
if (TypeValue == 0) {
TypeValue = CompletionTypes.size();
CachedCompletionTypes[QualType(CanUsageType).getAsString()]
= TypeValue;
}
CachedResult.Type = TypeValue;
}
CachedCompletionResults.push_back(CachedResult);
/// Handle nested-name-specifiers in C++.
if (TheSema->Context.getLangOpts().CPlusPlus && IsNestedNameSpecifier &&
!R.StartsNestedNameSpecifier) {
// The contexts in which a nested-name-specifier can appear in C++.
uint64_t NNSContexts
= (1LL << CodeCompletionContext::CCC_TopLevel)
| (1LL << CodeCompletionContext::CCC_ObjCIvarList)
| (1LL << CodeCompletionContext::CCC_ClassStructUnion)
| (1LL << CodeCompletionContext::CCC_Statement)
| (1LL << CodeCompletionContext::CCC_Expression)
| (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
| (1LL << CodeCompletionContext::CCC_EnumTag)
| (1LL << CodeCompletionContext::CCC_UnionTag)
| (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
| (1LL << CodeCompletionContext::CCC_Type)
| (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
if (isa<NamespaceDecl>(R.Declaration) ||
isa<NamespaceAliasDecl>(R.Declaration))
NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
if (unsigned RemainingContexts
= NNSContexts & ~CachedResult.ShowInContexts) {
// If there any contexts where this completion can be a
// nested-name-specifier but isn't already an option, create a
// nested-name-specifier completion.
R.StartsNestedNameSpecifier = true;
CachedResult.Completion = R.CreateCodeCompletionString(
*TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts = RemainingContexts;
CachedResult.Priority = CCP_NestedNameSpecifier;
CachedResult.TypeClass = STC_Void;
CachedResult.Type = 0;
CachedCompletionResults.push_back(CachedResult);
}
}
break;
}
case Result::RK_Keyword:
case Result::RK_Pattern:
// Ignore keywords and patterns; we don't care, since they are so
// easily regenerated.
break;
case Result::RK_Macro: {
CachedCodeCompletionResult CachedResult;
CachedResult.Completion = R.CreateCodeCompletionString(
*TheSema, CCContext, *CachedCompletionAllocator, CCTUInfo,
IncludeBriefCommentsInCodeCompletion);
CachedResult.ShowInContexts
= (1LL << CodeCompletionContext::CCC_TopLevel)
| (1LL << CodeCompletionContext::CCC_ObjCInterface)
| (1LL << CodeCompletionContext::CCC_ObjCImplementation)
| (1LL << CodeCompletionContext::CCC_ObjCIvarList)
| (1LL << CodeCompletionContext::CCC_ClassStructUnion)
| (1LL << CodeCompletionContext::CCC_Statement)
| (1LL << CodeCompletionContext::CCC_Expression)
| (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
| (1LL << CodeCompletionContext::CCC_MacroNameUse)
| (1LL << CodeCompletionContext::CCC_PreprocessorExpression)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
| (1LL << CodeCompletionContext::CCC_OtherWithMacros);
CachedResult.Priority = R.Priority;
CachedResult.Kind = R.CursorKind;
CachedResult.Availability = R.Availability;
CachedResult.TypeClass = STC_Void;
CachedResult.Type = 0;
CachedCompletionResults.push_back(CachedResult);
break;
}
}
}
// Save the current top-level hash value.
CompletionCacheTopLevelHashValue = CurrentTopLevelHashValue;
}
void ASTUnit::ClearCachedCompletionResults() {
CachedCompletionResults.clear();
CachedCompletionTypes.clear();
CachedCompletionAllocator = nullptr;
}
namespace {
#if 0 // HLSL Change Starts - no support for AST serialization
/// \brief Gathers information from ASTReader that will be used to initialize
/// a Preprocessor.
class ASTInfoCollector : public ASTReaderListener {
Preprocessor &PP;
ASTContext &Context;
LangOptions &LangOpt;
std::shared_ptr<TargetOptions> &TargetOpts;
IntrusiveRefCntPtr<TargetInfo> &Target;
unsigned &Counter;
bool InitializedLanguage;
public:
ASTInfoCollector(Preprocessor &PP, ASTContext &Context, LangOptions &LangOpt,
std::shared_ptr<TargetOptions> &TargetOpts,
IntrusiveRefCntPtr<TargetInfo> &Target, unsigned &Counter)
: PP(PP), Context(Context), LangOpt(LangOpt), TargetOpts(TargetOpts),
Target(Target), Counter(Counter), InitializedLanguage(false) {}
bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
bool AllowCompatibleDifferences) override {
if (InitializedLanguage)
return false;
LangOpt = LangOpts;
InitializedLanguage = true;
updated();
return false;
}
bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain,
bool AllowCompatibleDifferences) override {
// If we've already initialized the target, don't do it again.
if (Target)
return false;
this->TargetOpts = std::make_shared<TargetOptions>(TargetOpts);
Target =
TargetInfo::CreateTargetInfo(PP.getDiagnostics(), this->TargetOpts);
updated();
return false;
}
void ReadCounter(const serialization::ModuleFile &M,
unsigned Value) override {
Counter = Value;
}
private:
void updated() {
if (!Target || !InitializedLanguage)
return;
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Target->adjust(LangOpt);
// Initialize the preprocessor.
PP.Initialize(*Target);
// Initialize the ASTContext
Context.InitBuiltinTypes(*Target);
// We didn't have access to the comment options when the ASTContext was
// constructed, so register them now.
Context.getCommentCommandTraits().registerCommentOptions(
LangOpt.CommentOpts);
}
};
#endif // HLSL Change Ends - no support for AST serialization
/// \brief Diagnostic consumer that saves each diagnostic it is given.
class StoredDiagnosticConsumer : public DiagnosticConsumer {
SmallVectorImpl<StoredDiagnostic> &StoredDiags;
SourceManager *SourceMgr;
public:
explicit StoredDiagnosticConsumer(
SmallVectorImpl<StoredDiagnostic> &StoredDiags)
: StoredDiags(StoredDiags), SourceMgr(nullptr) {}
void BeginSourceFile(const LangOptions &LangOpts,
const Preprocessor *PP = nullptr) override {
if (PP)
SourceMgr = &PP->getSourceManager();
}
void HandleDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info) override;
};
/// \brief RAII object that optionally captures diagnostics, if
/// there is no diagnostic client to capture them already.
class CaptureDroppedDiagnostics {
DiagnosticsEngine &Diags;
StoredDiagnosticConsumer Client;
DiagnosticConsumer *PreviousClient;
std::unique_ptr<DiagnosticConsumer> OwningPreviousClient;
public:
CaptureDroppedDiagnostics(bool RequestCapture, DiagnosticsEngine &Diags,
SmallVectorImpl<StoredDiagnostic> &StoredDiags)
: Diags(Diags), Client(StoredDiags), PreviousClient(nullptr)
{
if (RequestCapture || Diags.getClient() == nullptr) {
OwningPreviousClient = Diags.takeClient();
PreviousClient = Diags.getClient();
Diags.setClient(&Client, false);
}
}
~CaptureDroppedDiagnostics() {
if (Diags.getClient() == &Client)
Diags.setClient(PreviousClient, !!OwningPreviousClient.release());
}
};
} // anonymous namespace
void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(Level, Info);
// Only record the diagnostic if it's part of the source manager we know
// about. This effectively drops diagnostics from modules we're building.
// FIXME: In the long run, ee don't want to drop source managers from modules.
if (!Info.hasSourceManager() || &Info.getSourceManager() == SourceMgr)
StoredDiags.emplace_back(Level, Info);
}
#if 1 // HLSL Change Starts - no support for AST serialization
ASTMutationListener *ASTUnit::getASTMutationListener() {
return nullptr;
}
#else
ASTMutationListener *ASTUnit::getASTMutationListener() {
if (WriterData)
return &WriterData->Writer;
return nullptr;
}
ASTDeserializationListener *ASTUnit::getDeserializationListener() {
if (WriterData)
return &WriterData->Writer;
return nullptr;
}
#endif // HLSL Change Ends - no support for AST serialization
std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
assert(FileMgr);
auto Buffer = FileMgr->getBufferForFile(Filename);
if (Buffer)
return std::move(*Buffer);
if (ErrorStr)
*ErrorStr = Buffer.getError().message();
return nullptr;
}
/// \brief Configure the diagnostics object for use with ASTUnit.
void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
ASTUnit &AST, bool CaptureDiagnostics, bool VerifyDiagnostics) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
if (CaptureDiagnostics) {
Diags->setClient(new StoredDiagnosticConsumer(AST.StoredDiagnostics));
// HLSL Change Starts
if (VerifyDiagnostics) {
Diags->setClient(new VerifyDiagnosticConsumer(*Diags));
}
// HLSL Change Ends
}
}
#if 0 // HLSL Change Starts - no support for AST serialization
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename,
const PCHContainerReader &PCHContainerRdr,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool OnlyLocalDecls,
ArrayRef<RemappedFile> RemappedFiles, bool CaptureDiagnostics,
bool AllowPCHWithCompilerErrors, bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
DiagCleanup(Diags.get());
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
IntrusiveRefCntPtr<vfs::FileSystem> VFS = vfs::getRealFileSystem();
AST->FileMgr = new FileManager(FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
AST->getFileManager(),
UserFilesAreVolatile);
AST->HSOpts = new HeaderSearchOptions();
AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
AST->ASTFileLangOpts,
/*Target=*/nullptr));
PreprocessorOptions *PPOpts = new PreprocessorOptions();
for (const auto &RemappedFile : RemappedFiles)
PPOpts->addRemappedFile(RemappedFile.first, RemappedFile.second);
// Gather Info for preprocessor construction later on.
HeaderSearch &HeaderInfo = *AST->HeaderInfo;
unsigned Counter;
AST->PP =
new Preprocessor(PPOpts, AST->getDiagnostics(), AST->ASTFileLangOpts,
AST->getSourceManager(), HeaderInfo, *AST,
/*IILookup=*/nullptr,
/*OwnsHeaderSearch=*/false);
Preprocessor &PP = *AST->PP;
AST->Ctx = new ASTContext(AST->ASTFileLangOpts, AST->getSourceManager(),
PP.getIdentifierTable(), PP.getSelectorTable(),
PP.getBuiltinInfo());
ASTContext &Context = *AST->Ctx;
bool disableValid = false;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
disableValid = true;
AST->Reader = new ASTReader(PP, Context, PCHContainerRdr,
/*isysroot=*/"",
/*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors);
AST->Reader->setListener(llvm::make_unique<ASTInfoCollector>(
*AST->PP, Context, AST->ASTFileLangOpts, AST->TargetOpts, AST->Target,
Counter));
// Attach the AST reader to the AST context as an external AST
// source, so that declarations will be deserialized from the
// AST file as needed.
// We need the external source to be set up before we read the AST, because
// eagerly-deserialized declarations may use it.
Context.setExternalSource(AST->Reader);
switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
SourceLocation(), ASTReader::ARR_None)) {
case ASTReader::Success:
break;
case ASTReader::Failure:
case ASTReader::Missing:
case ASTReader::OutOfDate:
case ASTReader::VersionMismatch:
case ASTReader::ConfigurationMismatch:
case ASTReader::HadErrors:
AST->getDiagnostics().Report(diag::err_fe_unable_to_load_pch);
return nullptr;
}
AST->OriginalSourceFile = AST->Reader->getOriginalSourceFile();
PP.setCounterValue(Counter);
// Create an AST consumer, even though it isn't used.
AST->Consumer.reset(new ASTConsumer);
// Create a semantic analysis object and tell the AST reader about it.
AST->TheSema.reset(new Sema(PP, Context, *AST->Consumer));
AST->TheSema->Initialize();
AST->Reader->InitializeSema(*AST->TheSema);
// Tell the diagnostic client that we have started a source file.
AST->getDiagnostics().getClient()->BeginSourceFile(Context.getLangOpts(),&PP);
return AST;
}
#endif // HLSL Change Ends - no support for AST serialization
namespace {
/// \brief Preprocessor callback class that updates a hash value with the names
/// of all macros that have been defined by the translation unit.
class MacroDefinitionTrackerPPCallbacks : public PPCallbacks {
unsigned &Hash;
public:
explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
void MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) override {
Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
}
};
/// \brief Add the given declaration to the hash of all top-level entities.
void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
if (!D)
return;
DeclContext *DC = D->getDeclContext();
if (!DC)
return;
if (!(DC->isTranslationUnit() || DC->getLookupParent()->isTranslationUnit()))
return;
if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
if (EnumDecl *EnumD = dyn_cast<EnumDecl>(D)) {
// For an unscoped enum include the enumerators in the hash since they
// enter the top-level namespace.
if (!EnumD->isScoped()) {
for (const auto *EI : EnumD->enumerators()) {
if (EI->getIdentifier())
Hash = llvm::HashString(EI->getIdentifier()->getName(), Hash);
}
}
}
if (ND->getIdentifier())
Hash = llvm::HashString(ND->getIdentifier()->getName(), Hash);
else if (DeclarationName Name = ND->getDeclName()) {
std::string NameStr = Name.getAsString();
Hash = llvm::HashString(NameStr, Hash);
}
return;
}
if (ImportDecl *ImportD = dyn_cast<ImportDecl>(D)) {
if (Module *Mod = ImportD->getImportedModule()) {
std::string ModName = Mod->getFullModuleName();
Hash = llvm::HashString(ModName, Hash);
}
return;
}
}
class TopLevelDeclTrackerConsumer : public ASTConsumer {
ASTUnit &Unit;
unsigned &Hash;
public:
TopLevelDeclTrackerConsumer(ASTUnit &_Unit, unsigned &Hash)
: Unit(_Unit), Hash(Hash) {
Hash = 0;
}
void handleTopLevelDecl(Decl *D) {
if (!D)
return;
// FIXME: Currently ObjC method declarations are incorrectly being
// reported as top-level declarations, even though their DeclContext
// is the containing ObjC @interface/@implementation. This is a
// fundamental problem in the parser right now.
if (isa<ObjCMethodDecl>(D))
return;
AddTopLevelDeclarationToHash(D, Hash);
Unit.addTopLevelDecl(D);
handleFileLevelDecl(D);
}
void handleFileLevelDecl(Decl *D) {
Unit.addFileLevelDecl(D);
if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
for (auto *I : NSD->decls())
handleFileLevelDecl(I);
}
}
bool HandleTopLevelDecl(DeclGroupRef D) override {
for (Decl *TopLevelDecl : D)
handleTopLevelDecl(TopLevelDecl);
return true;
}
// We're not interested in "interesting" decls.
void HandleInterestingDecl(DeclGroupRef) override {}
void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
for (Decl *TopLevelDecl : D)
handleTopLevelDecl(TopLevelDecl);
}
ASTMutationListener *GetASTMutationListener() override {
return Unit.getASTMutationListener();
}
ASTDeserializationListener *GetASTDeserializationListener() override {
return nullptr; // return Unit.getDeserializationListener(); // HLSL Change - no support for serialization
}
};
class TopLevelDeclTrackerAction : public ASTFrontendAction {
public:
ASTUnit &Unit;
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override {
CI.getPreprocessor().addPPCallbacks(
llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
Unit.getCurrentTopLevelHashValue()));
return llvm::make_unique<TopLevelDeclTrackerConsumer>(
Unit, Unit.getCurrentTopLevelHashValue());
}
public:
TopLevelDeclTrackerAction(ASTUnit &_Unit) : Unit(_Unit) {}
bool hasCodeCompletionSupport() const override { return false; }
TranslationUnitKind getTranslationUnitKind() override {
return Unit.getTranslationUnitKind();
}
};
#if 0 // HLSL Change Starts - no support for PCH
class PrecompilePreambleAction : public ASTFrontendAction {
ASTUnit &Unit;
bool HasEmittedPreamblePCH;
public:
explicit PrecompilePreambleAction(ASTUnit &Unit)
: Unit(Unit), HasEmittedPreamblePCH(false) {}
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
void setHasEmittedPreamblePCH() { HasEmittedPreamblePCH = true; }
bool shouldEraseOutputFiles() override { return !hasEmittedPreamblePCH(); }
bool hasCodeCompletionSupport() const override { return false; }
bool hasASTFileSupport() const override { return false; }
TranslationUnitKind getTranslationUnitKind() override { return TU_Prefix; }
};
class PrecompilePreambleConsumer : public PCHGenerator {
ASTUnit &Unit;
unsigned &Hash;
std::vector<Decl *> TopLevelDecls;
PrecompilePreambleAction *Action;
raw_ostream *Out;
public:
PrecompilePreambleConsumer(ASTUnit &Unit, PrecompilePreambleAction *Action,
const Preprocessor &PP, StringRef isysroot,
raw_ostream *Out)
: PCHGenerator(PP, "", nullptr, isysroot, std::make_shared<PCHBuffer>(),
/*AllowASTWithErrors=*/true),
Unit(Unit), Hash(Unit.getCurrentTopLevelHashValue()), Action(Action),
Out(Out) {
Hash = 0;
}
bool HandleTopLevelDecl(DeclGroupRef DG) override {
for (Decl *D : DG) {
// FIXME: Currently ObjC method declarations are incorrectly being
// reported as top-level declarations, even though their DeclContext
// is the containing ObjC @interface/@implementation. This is a
// fundamental problem in the parser right now.
if (isa<ObjCMethodDecl>(D))
continue;
AddTopLevelDeclarationToHash(D, Hash);
TopLevelDecls.push_back(D);
}
return true;
}
void HandleTranslationUnit(ASTContext &Ctx) override {
PCHGenerator::HandleTranslationUnit(Ctx);
if (hasEmittedPCH()) {
// Write the generated bitstream to "Out".
*Out << getPCH();
// Make sure it hits disk now.
Out->flush();
// Free the buffer.
llvm::SmallVector<char, 0> Empty;
getPCH() = std::move(Empty);
// Translate the top-level declarations we captured during
// parsing into declaration IDs in the precompiled
// preamble. This will allow us to deserialize those top-level
// declarations when requested.
for (Decl *D : TopLevelDecls) {
// Invalid top-level decls may not have been serialized.
if (D->isInvalidDecl())
continue;
Unit.addTopLevelDeclFromPreamble(getWriter().getDeclID(D));
}
Action->setHasEmittedPreamblePCH();
}
}
};
#endif // HLSL Change Ends - no support for PCH
}
#if 0 // HLSL Change Starts - no support for PCH
std::unique_ptr<ASTConsumer>
PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
std::string Sysroot;
std::string OutputFile;
raw_ostream *OS = GeneratePCHAction::ComputeASTConsumerArguments(
CI, InFile, Sysroot, OutputFile);
if (!OS)
return nullptr;
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
CI.getPreprocessor().addPPCallbacks(
llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
Unit.getCurrentTopLevelHashValue()));
return llvm::make_unique<PrecompilePreambleConsumer>(
Unit, this, CI.getPreprocessor(), Sysroot, OS);
}
#endif // HLSL Change Ends - no support for PCH
static bool isNonDriverDiag(const StoredDiagnostic &StoredDiag) {
return StoredDiag.getLocation().isValid();
}
static void
checkAndRemoveNonDriverDiags(SmallVectorImpl<StoredDiagnostic> &StoredDiags) {
// Get rid of stored diagnostics except the ones from the driver which do not
// have a source location.
StoredDiags.erase(
std::remove_if(StoredDiags.begin(), StoredDiags.end(), isNonDriverDiag),
StoredDiags.end());
}
static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
StoredDiagnostics,
SourceManager &SM) {
// The stored diagnostic has the old source manager in it; update
// the locations to refer into the new source manager. Since we've
// been careful to make sure that the source manager's state
// before and after are identical, so that we can reuse the source
// location itself.
for (StoredDiagnostic &SD : StoredDiagnostics) {
if (SD.getLocation().isValid()) {
FullSourceLoc Loc(SD.getLocation(), SM);
SD.setLocation(Loc);
}
}
}
/// Parse the source file into a translation unit using the given compiler
/// invocation, replacing the current translation unit.
///
/// \returns True if a failure occurred that causes the ASTUnit not to
/// contain any translation-unit information, false otherwise.
bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer) {
SavedMainFileBuffer.reset();
if (!Invocation)
return true;
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(PCHContainerOps));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
IntrusiveRefCntPtr<CompilerInvocation>
CCInvocation(new CompilerInvocation(*Invocation));
Clang->HlslLangExtensions = HlslLangExtensions; // HLSL Change
Clang->setInvocation(CCInvocation.get());
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&getDiagnostics());
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
if (!Clang->hasTarget())
return true;
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
// Configure the various subsystems.
LangOpts = Clang->getInvocation().LangOpts;
FileSystemOpts = Clang->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(Clang->getInvocation(), getDiagnostics());
if (!VFS)
return true;
FileMgr = new FileManager(FileSystemOpts, VFS);
SourceMgr = new SourceManager(getDiagnostics(), *FileMgr,
UserFilesAreVolatile);
TheSema.reset();
Ctx = nullptr;
PP = nullptr;
Reader = nullptr;
// Clear out old caches and data.
TopLevelDecls.clear();
clearFileLevelDecls();
CleanTemporaryFiles();
if (!OverrideMainBuffer) {
checkAndRemoveNonDriverDiags(StoredDiagnostics);
TopLevelDeclsInPreamble.clear();
}
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(&getFileManager());
// Create the source manager.
Clang->setSourceManager(&getSourceManager());
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile,
OverrideMainBuffer.get());
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
PreprocessorOpts.DisablePCHValidation = true;
// The stored diagnostic has the old source manager in it; update
// the locations to refer into the new source manager. Since we've
// been careful to make sure that the source manager's state
// before and after are identical, so that we can reuse the source
// location itself.
checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
// Keep track of the override buffer;
SavedMainFileBuffer = std::move(OverrideMainBuffer);
}
std::unique_ptr<TopLevelDeclTrackerAction> Act(
new TopLevelDeclTrackerAction(*this));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
ActCleanup(Act.get());
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
goto error;
if (SavedMainFileBuffer) {
std::string ModName = getPreambleFile(this);
TranslateStoredDiagnostics(getFileManager(), getSourceManager(),
PreambleDiagnostics, StoredDiagnostics);
}
if (!Act->Execute())
goto error;
transferASTDataFromCompilerInstance(*Clang);
Act->EndSourceFile();
FailedParseDiagnostics.clear();
return false;
error:
// Remove the overridden buffer we used for the preamble.
SavedMainFileBuffer = nullptr;
// Keep the ownership of the data in the ASTUnit because the client may
// want to see the diagnostics.
transferASTDataFromCompilerInstance(*Clang);
FailedParseDiagnostics.swap(StoredDiagnostics);
StoredDiagnostics.clear();
NumStoredDiagnosticsFromDriver = 0;
return true;
}
#if 0 // HLSL Change Ends - no support for PCH
/// \brief Simple function to retrieve a path for a preamble precompiled header.
static std::string GetPreamblePCHPath() {
// FIXME: This is a hack so that we can override the preamble file during
// crash-recovery testing, which is the only case where the preamble files
// are not necessarily cleaned up.
const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE");
if (TmpFile)
return TmpFile;
SmallString<128> Path;
llvm::sys::fs::createTemporaryFile("preamble", "pch", Path);
return Path.str();
}
#endif // HLSL Change Ends - no support for PCH
/// \brief Compute the preamble for the main file, providing the source buffer
/// that corresponds to the main file along with a pair (bytes, start-of-line)
/// that describes the preamble.
ASTUnit::ComputedPreamble
ASTUnit::ComputePreamble(CompilerInvocation &Invocation, unsigned MaxLines) {
FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
PreprocessorOptions &PreprocessorOpts = Invocation.getPreprocessorOpts();
// Try to determine if the main file has been remapped, either from the
// command line (to another file) or directly through the compiler invocation
// (to a memory buffer).
llvm::MemoryBuffer *Buffer = nullptr;
std::unique_ptr<llvm::MemoryBuffer> BufferOwner;
std::string MainFilePath(FrontendOpts.Inputs[0].getFile());
llvm::sys::fs::UniqueID MainFileID;
if (!llvm::sys::fs::getUniqueID(MainFilePath, MainFileID)) {
// Check whether there is a file-file remapping of the main file
for (const auto &RF : PreprocessorOpts.RemappedFiles) {
std::string MPath(RF.first);
llvm::sys::fs::UniqueID MID;
if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
if (MainFileID == MID) {
// We found a remapping. Try to load the resulting, remapped source.
BufferOwner = getBufferForFile(RF.second);
if (!BufferOwner)
return ComputedPreamble(nullptr, nullptr, 0, true);
}
}
}
// Check whether there is a file-buffer remapping. It supercedes the
// file-file remapping.
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
std::string MPath(RB.first);
llvm::sys::fs::UniqueID MID;
if (!llvm::sys::fs::getUniqueID(MPath, MID)) {
if (MainFileID == MID) {
// We found a remapping.
BufferOwner.reset();
Buffer = const_cast<llvm::MemoryBuffer *>(RB.second);
}
}
}
}
// If the main source file was not remapped, load it now.
if (!Buffer && !BufferOwner) {
BufferOwner = getBufferForFile(FrontendOpts.Inputs[0].getFile());
if (!BufferOwner)
return ComputedPreamble(nullptr, nullptr, 0, true);
}
if (!Buffer)
Buffer = BufferOwner.get();
auto Pre = Lexer::ComputePreamble(Buffer->getBuffer(),
*Invocation.getLangOpts(), MaxLines);
return ComputedPreamble(Buffer, std::move(BufferOwner), Pre.first,
Pre.second);
}
ASTUnit::PreambleFileHash
ASTUnit::PreambleFileHash::createForFile(off_t Size, time_t ModTime) {
PreambleFileHash Result;
Result.Size = Size;
Result.ModTime = ModTime;
memset(Result.MD5, 0, sizeof(Result.MD5));
return Result;
}
ASTUnit::PreambleFileHash ASTUnit::PreambleFileHash::createForMemoryBuffer(
const llvm::MemoryBuffer *Buffer) {
PreambleFileHash Result;
Result.Size = Buffer->getBufferSize();
Result.ModTime = 0;
llvm::MD5 MD5Ctx;
MD5Ctx.update(Buffer->getBuffer().data());
MD5Ctx.final(Result.MD5);
return Result;
}
namespace clang {
bool operator==(const ASTUnit::PreambleFileHash &LHS,
const ASTUnit::PreambleFileHash &RHS) {
return LHS.Size == RHS.Size && LHS.ModTime == RHS.ModTime &&
memcmp(LHS.MD5, RHS.MD5, sizeof(LHS.MD5)) == 0;
}
} // namespace clang
#if 0 // HLSL Change Starts - no support for PCH
static std::pair<unsigned, unsigned>
makeStandaloneRange(CharSourceRange Range, const SourceManager &SM,
const LangOptions &LangOpts) {
CharSourceRange FileRange = Lexer::makeFileCharRange(Range, SM, LangOpts);
unsigned Offset = SM.getFileOffset(FileRange.getBegin());
unsigned EndOffset = SM.getFileOffset(FileRange.getEnd());
return std::make_pair(Offset, EndOffset);
}
static ASTUnit::StandaloneFixIt makeStandaloneFixIt(const SourceManager &SM,
const LangOptions &LangOpts,
const FixItHint &InFix) {
ASTUnit::StandaloneFixIt OutFix;
OutFix.RemoveRange = makeStandaloneRange(InFix.RemoveRange, SM, LangOpts);
OutFix.InsertFromRange = makeStandaloneRange(InFix.InsertFromRange, SM,
LangOpts);
OutFix.CodeToInsert = InFix.CodeToInsert;
OutFix.BeforePreviousInsertions = InFix.BeforePreviousInsertions;
return OutFix;
}
static ASTUnit::StandaloneDiagnostic
makeStandaloneDiagnostic(const LangOptions &LangOpts,
const StoredDiagnostic &InDiag) {
ASTUnit::StandaloneDiagnostic OutDiag;
OutDiag.ID = InDiag.getID();
OutDiag.Level = InDiag.getLevel();
OutDiag.Message = InDiag.getMessage();
OutDiag.LocOffset = 0;
if (InDiag.getLocation().isInvalid())
return OutDiag;
const SourceManager &SM = InDiag.getLocation().getManager();
SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
OutDiag.Filename = SM.getFilename(FileLoc);
if (OutDiag.Filename.empty())
return OutDiag;
OutDiag.LocOffset = SM.getFileOffset(FileLoc);
for (const CharSourceRange &Range : InDiag.getRanges())
OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts));
for (const FixItHint &FixIt : InDiag.getFixIts())
OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt));
return OutDiag;
}
#endif // HLSL Change Ends - no support for PCH
/// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
/// the source file.
///
/// This routine will compute the preamble of the main source file. If a
/// non-trivial preamble is found, it will precompile that preamble into a
/// precompiled header so that the precompiled preamble can be used to reduce
/// reparsing time. If a precompiled preamble has already been constructed,
/// this routine will determine if it is still valid and, if so, avoid
/// rebuilding the precompiled preamble.
///
/// \param AllowRebuild When true (the default), this routine is
/// allowed to rebuild the precompiled preamble if it is found to be
/// out-of-date.
///
/// \param MaxLines When non-zero, the maximum number of lines that
/// can occur within the preamble.
///
/// \returns If the precompiled preamble can be used, returns a newly-allocated
/// buffer that should be used in place of the main file when doing so.
/// Otherwise, returns a NULL pointer.
std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getMainBufferWithPrecompiledPreamble(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
const CompilerInvocation &PreambleInvocationIn, bool AllowRebuild,
unsigned MaxLines) {
#if 1 // HLSL Change Starts - no support for PCH
return std::unique_ptr<llvm::MemoryBuffer>();
#else
IntrusiveRefCntPtr<CompilerInvocation>
PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
PreprocessorOptions &PreprocessorOpts
= PreambleInvocation->getPreprocessorOpts();
ComputedPreamble NewPreamble = ComputePreamble(*PreambleInvocation, MaxLines);
if (!NewPreamble.Size) {
// We couldn't find a preamble in the main source. Clear out the current
// preamble, if we have one. It's obviously no good any more.
Preamble.clear();
erasePreambleFile(this);
// The next time we actually see a preamble, precompile it.
PreambleRebuildCounter = 1;
return nullptr;
}
if (!Preamble.empty()) {
// We've previously computed a preamble. Check whether we have the same
// preamble now that we did before, and that there's enough space in
// the main-file buffer within the precompiled preamble to fit the
// new main file.
if (Preamble.size() == NewPreamble.Size &&
PreambleEndsAtStartOfLine == NewPreamble.PreambleEndsAtStartOfLine &&
memcmp(Preamble.getBufferStart(), NewPreamble.Buffer->getBufferStart(),
NewPreamble.Size) == 0) {
// The preamble has not changed. We may be able to re-use the precompiled
// preamble.
// Check that none of the files used by the preamble have changed.
bool AnyFileChanged = false;
// First, make a record of those files that have been overridden via
// remapping or unsaved_files.
llvm::StringMap<PreambleFileHash> OverriddenFiles;
for (const auto &R : PreprocessorOpts.RemappedFiles) {
if (AnyFileChanged)
break;
vfs::Status Status;
if (FileMgr->getNoncachedStatValue(R.second, Status)) {
// If we can't stat the file we're remapping to, assume that something
// horrible happened.
AnyFileChanged = true;
break;
}
OverriddenFiles[R.first] = PreambleFileHash::createForFile(
Status.getSize(), Status.getLastModificationTime().toEpochTime());
}
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
if (AnyFileChanged)
break;
OverriddenFiles[RB.first] =
PreambleFileHash::createForMemoryBuffer(RB.second);
}
// Check whether anything has changed.
for (llvm::StringMap<PreambleFileHash>::iterator
F = FilesInPreamble.begin(), FEnd = FilesInPreamble.end();
!AnyFileChanged && F != FEnd;
++F) {
llvm::StringMap<PreambleFileHash>::iterator Overridden
= OverriddenFiles.find(F->first());
if (Overridden != OverriddenFiles.end()) {
// This file was remapped; check whether the newly-mapped file
// matches up with the previous mapping.
if (Overridden->second != F->second)
AnyFileChanged = true;
continue;
}
// The file was not remapped; check whether it has changed on disk.
vfs::Status Status;
if (FileMgr->getNoncachedStatValue(F->first(), Status)) {
// If we can't stat the file, assume that something horrible happened.
AnyFileChanged = true;
} else if (Status.getSize() != uint64_t(F->second.Size) ||
Status.getLastModificationTime().toEpochTime() !=
uint64_t(F->second.ModTime))
AnyFileChanged = true;
}
if (!AnyFileChanged) {
// Okay! We can re-use the precompiled preamble.
// Set the state of the diagnostic object to mimic its state
// after parsing the preamble.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(),
PreambleInvocation->getDiagnosticOpts());
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
return llvm::MemoryBuffer::getMemBufferCopy(
NewPreamble.Buffer->getBuffer(), FrontendOpts.Inputs[0].getFile());
}
}
// If we aren't allowed to rebuild the precompiled preamble, just
// return now.
if (!AllowRebuild)
return nullptr;
// We can't reuse the previously-computed preamble. Build a new one.
Preamble.clear();
PreambleDiagnostics.clear();
erasePreambleFile(this);
PreambleRebuildCounter = 1;
} else if (!AllowRebuild) {
// We aren't allowed to rebuild the precompiled preamble; just
// return now.
return nullptr;
}
// If the preamble rebuild counter > 1, it's because we previously
// failed to build a preamble and we're not yet ready to try
// again. Decrement the counter and return a failure.
if (PreambleRebuildCounter > 1) {
--PreambleRebuildCounter;
return nullptr;
}
// Create a temporary file for the precompiled preamble. In rare
// circumstances, this can fail.
std::string PreamblePCHPath = GetPreamblePCHPath();
if (PreamblePCHPath.empty()) {
// Try again next time.
PreambleRebuildCounter = 1;
return nullptr;
}
// We did not previously compute a preamble, or it can't be reused anyway.
SimpleTimer PreambleTimer(WantTiming);
PreambleTimer.setOutput("Precompiling preamble");
// Save the preamble text for later; we'll need to compare against it for
// subsequent reparses.
StringRef MainFilename = FrontendOpts.Inputs[0].getFile();
Preamble.assign(FileMgr->getFile(MainFilename),
NewPreamble.Buffer->getBufferStart(),
NewPreamble.Buffer->getBufferStart() + NewPreamble.Size);
PreambleEndsAtStartOfLine = NewPreamble.PreambleEndsAtStartOfLine;
PreambleBuffer = llvm::MemoryBuffer::getMemBufferCopy(
NewPreamble.Buffer->getBuffer().slice(0, Preamble.size()), MainFilename);
// Remap the main source file to the preamble buffer.
StringRef MainFilePath = FrontendOpts.Inputs[0].getFile();
PreprocessorOpts.addRemappedFile(MainFilePath, PreambleBuffer.get());
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
// FIXME: Generate the precompiled header into memory?
FrontendOpts.OutputFile = PreamblePCHPath;
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Create the compiler instance to use for building the precompiled preamble.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(PCHContainerOps));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(&*PreambleInvocation);
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing all of the diagnostics produced.
Clang->setDiagnostics(&getDiagnostics());
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
if (!Clang->hasTarget()) {
llvm::sys::fs::remove(FrontendOpts.OutputFile);
Preamble.clear();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.RemappedFileBuffers.pop_back();
return nullptr;
}
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
// Clear out old caches and data.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(), Clang->getDiagnosticOpts());
checkAndRemoveNonDriverDiags(StoredDiagnostics);
TopLevelDecls.clear();
TopLevelDeclsInPreamble.clear();
PreambleDiagnostics.clear();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(Clang->getInvocation(), getDiagnostics());
if (!VFS)
return nullptr;
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(new FileManager(Clang->getFileSystemOpts(), VFS));
// Create the source manager.
Clang->setSourceManager(new SourceManager(getDiagnostics(),
Clang->getFileManager()));
auto PreambleDepCollector = std::make_shared<DependencyCollector>();
Clang->addDependencyCollector(PreambleDepCollector);
std::unique_ptr<PrecompilePreambleAction> Act;
Act.reset(new PrecompilePreambleAction(*this));
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
llvm::sys::fs::remove(FrontendOpts.OutputFile);
Preamble.clear();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.RemappedFileBuffers.pop_back();
return nullptr;
}
Act->Execute();
// Transfer any diagnostics generated when parsing the preamble into the set
// of preamble diagnostics.
for (stored_diag_iterator I = stored_diag_afterDriver_begin(),
E = stored_diag_end();
I != E; ++I)
PreambleDiagnostics.push_back(
makeStandaloneDiagnostic(Clang->getLangOpts(), *I));
Act->EndSourceFile();
checkAndRemoveNonDriverDiags(StoredDiagnostics);
if (!Act->hasEmittedPreamblePCH()) {
// The preamble PCH failed (e.g. there was a module loading fatal error),
// so no precompiled header was generated. Forget that we even tried.
// FIXME: Should we leave a note for ourselves to try again?
llvm::sys::fs::remove(FrontendOpts.OutputFile);
Preamble.clear();
TopLevelDeclsInPreamble.clear();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
PreprocessorOpts.RemappedFileBuffers.pop_back();
return nullptr;
}
// Keep track of the preamble we precompiled.
setPreambleFile(this, FrontendOpts.OutputFile);
NumWarningsInPreamble = getDiagnostics().getNumWarnings();
// Keep track of all of the files that the source manager knows about,
// so we can verify whether they have changed or not.
FilesInPreamble.clear();
SourceManager &SourceMgr = Clang->getSourceManager();
for (auto &Filename : PreambleDepCollector->getDependencies()) {
const FileEntry *File = Clang->getFileManager().getFile(Filename);
if (!File || File == SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()))
continue;
if (time_t ModTime = File->getModificationTime()) {
FilesInPreamble[File->getName()] = PreambleFileHash::createForFile(
File->getSize(), ModTime);
} else {
llvm::MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File);
FilesInPreamble[File->getName()] =
PreambleFileHash::createForMemoryBuffer(Buffer);
}
}
PreambleRebuildCounter = 1;
PreprocessorOpts.RemappedFileBuffers.pop_back();
// If the hash of top-level entities differs from the hash of the top-level
// entities the last time we rebuilt the preamble, clear out the completion
// cache.
if (CurrentTopLevelHashValue != PreambleTopLevelHashValue) {
CompletionCacheTopLevelHashValue = 0;
PreambleTopLevelHashValue = CurrentTopLevelHashValue;
}
return llvm::MemoryBuffer::getMemBufferCopy(NewPreamble.Buffer->getBuffer(),
MainFilename);
#endif // HLSL Change Ends - no support for PCH
}
void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
std::vector<Decl *> Resolved;
Resolved.reserve(TopLevelDeclsInPreamble.size());
ExternalASTSource &Source = *getASTContext().getExternalSource();
for (serialization::DeclID TopLevelDecl : TopLevelDeclsInPreamble) {
// Resolve the declaration ID to an actual declaration, possibly
// deserializing the declaration in the process.
if (Decl *D = Source.GetExternalDecl(TopLevelDecl))
Resolved.push_back(D);
}
TopLevelDeclsInPreamble.clear();
TopLevelDecls.insert(TopLevelDecls.begin(), Resolved.begin(), Resolved.end());
}
void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
// Steal the created target, context, and preprocessor if they have been
// created.
assert(CI.hasInvocation() && "missing invocation");
LangOpts = CI.getInvocation().LangOpts;
TheSema = CI.takeSema();
Consumer = CI.takeASTConsumer();
if (CI.hasASTContext())
Ctx = &CI.getASTContext();
if (CI.hasPreprocessor())
PP = &CI.getPreprocessor();
CI.setSourceManager(nullptr);
CI.setFileManager(nullptr);
if (CI.hasTarget())
Target = &CI.getTarget();
// Reader = CI.getModuleManager(); // HLSL Change - no support for AST serialization
HadModuleLoaderFatalFailure = CI.hadModuleLoaderFatalFailure();
}
StringRef ASTUnit::getMainFileName() const {
if (Invocation && !Invocation->getFrontendOpts().Inputs.empty()) {
const FrontendInputFile &Input = Invocation->getFrontendOpts().Inputs[0];
if (Input.isFile())
return Input.getFile();
else
return Input.getBuffer()->getBufferIdentifier();
}
if (SourceMgr) {
if (const FileEntry *
FE = SourceMgr->getFileEntryForID(SourceMgr->getMainFileID()))
return FE->getName();
}
return StringRef();
}
StringRef ASTUnit::getASTFileName() const {
#if 1 // HLSL Change Starts - no support for modules
return StringRef();
#else
if (!isMainFileAST())
return StringRef();
serialization::ModuleFile &
Mod = Reader->getModuleManager().getPrimaryModule();
return Mod.FileName;
#endif // HLSL Change Ends - no support for modules
}
ASTUnit *ASTUnit::create(CompilerInvocation *CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
bool CaptureDiagnostics,
bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->Invocation = CI;
AST->FileSystemOpts = CI->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
if (!VFS)
return nullptr;
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr,
UserFilesAreVolatile);
return AST.release();
}
ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
CompilerInvocation *CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, ASTFrontendAction *Action,
ASTUnit *Unit, bool Persistent, StringRef ResourceFilesPath,
bool OnlyLocalDecls, bool CaptureDiagnostics, bool PrecompilePreamble,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
bool UserFilesAreVolatile, std::unique_ptr<ASTUnit> *ErrAST) {
assert(CI && "A CompilerInvocation is required");
std::unique_ptr<ASTUnit> OwnAST;
ASTUnit *AST = Unit;
if (!AST) {
// Create the AST unit.
OwnAST.reset(create(CI, Diags, CaptureDiagnostics, UserFilesAreVolatile));
AST = OwnAST.get();
if (!AST)
return nullptr;
}
if (!ResourceFilesPath.empty()) {
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
}
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
if (PrecompilePreamble)
AST->PreambleRebuildCounter = 2;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->IncludeBriefCommentsInCodeCompletion
= IncludeBriefCommentsInCodeCompletion;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(OwnAST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
DiagCleanup(Diags.get());
// We'll manage file buffers ourselves.
CI->getPreprocessorOpts().RetainRemappedFileBuffers = true;
CI->getFrontendOpts().DisableFree = false;
ProcessWarningOptions(AST->getDiagnostics(), CI->getDiagnosticOpts());
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(PCHContainerOps));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(CI);
AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&AST->getDiagnostics());
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
if (!Clang->hasTarget())
return nullptr;
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not supported here!");
// Configure the various subsystems.
AST->TheSema.reset();
AST->Ctx = nullptr;
AST->PP = nullptr;
AST->Reader = nullptr;
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(&AST->getFileManager());
// Create the source manager.
Clang->setSourceManager(&AST->getSourceManager());
ASTFrontendAction *Act = Action;
std::unique_ptr<TopLevelDeclTrackerAction> TrackerAct;
if (!Act) {
TrackerAct.reset(new TopLevelDeclTrackerAction(*AST));
Act = TrackerAct.get();
}
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
ActCleanup(TrackerAct.get());
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
AST->transferASTDataFromCompilerInstance(*Clang);
if (OwnAST && ErrAST)
ErrAST->swap(OwnAST);
return nullptr;
}
if (Persistent && !TrackerAct) {
Clang->getPreprocessor().addPPCallbacks(
llvm::make_unique<MacroDefinitionTrackerPPCallbacks>(
AST->getCurrentTopLevelHashValue()));
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
if (Clang->hasASTConsumer())
Consumers.push_back(Clang->takeASTConsumer());
Consumers.push_back(llvm::make_unique<TopLevelDeclTrackerConsumer>(
*AST, AST->getCurrentTopLevelHashValue()));
Clang->setASTConsumer(
llvm::make_unique<MultiplexConsumer>(std::move(Consumers)));
}
if (!Act->Execute()) {
AST->transferASTDataFromCompilerInstance(*Clang);
if (OwnAST && ErrAST)
ErrAST->swap(OwnAST);
return nullptr;
}
// Steal the created target, context, and preprocessor.
AST->transferASTDataFromCompilerInstance(*Clang);
Act->EndSourceFile();
if (OwnAST)
return OwnAST.release();
else
return AST;
}
bool ASTUnit::LoadFromCompilerInvocation(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
bool PrecompilePreamble) {
if (!Invocation)
return true;
// We'll manage file buffers ourselves.
Invocation->getPreprocessorOpts().RetainRemappedFileBuffers = true;
Invocation->getFrontendOpts().DisableFree = false;
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (PrecompilePreamble) {
PreambleRebuildCounter = 2;
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
}
SimpleTimer ParsingTimer(WantTiming);
ParsingTimer.setOutput("Parsing " + getMainFileName());
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<llvm::MemoryBuffer>
MemBufferCleanup(OverrideMainBuffer.get());
return Parse(PCHContainerOps, std::move(OverrideMainBuffer));
}
std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
CompilerInvocation *CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, bool OnlyLocalDecls,
bool CaptureDiagnostics, bool PrecompilePreamble,
TranslationUnitKind TUKind, bool CacheCodeCompletionResults,
bool IncludeBriefCommentsInCodeCompletion, bool UserFilesAreVolatile) {
// Create the AST unit.
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->TUKind = TUKind;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->IncludeBriefCommentsInCodeCompletion
= IncludeBriefCommentsInCodeCompletion;
AST->Invocation = CI;
AST->FileSystemOpts = CI->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
if (!VFS)
return nullptr;
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
DiagCleanup(Diags.get());
if (AST->LoadFromCompilerInvocation(PCHContainerOps, PrecompilePreamble))
return nullptr;
return AST;
}
ASTUnit *ASTUnit::LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
bool OnlyLocalDecls, bool CaptureDiagnostics,
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
bool PrecompilePreamble, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
bool UserFilesAreVolatile, bool ForSerialization,
std::unique_ptr<ASTUnit> *ErrAST,
hlsl::DxcLangExtensionsHelperApply *HlslLangExtensions) { // HLSL Change
assert(Diags.get() && "no DiagnosticsEngine was provided");
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
IntrusiveRefCntPtr<CompilerInvocation> CI;
{
CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
StoredDiagnostics);
CI = clang::createInvocationFromCommandLine(
llvm::makeArrayRef(ArgBegin, ArgEnd),
Diags);
if (!CI)
return nullptr;
}
// HLSL Change Starts
// Add extensions.
// Override diagnostics to avoid reporting on extensions that may differ in other languages.
if (CI->getLangOpts()->HLSL) {
CI->getDiagnosticOpts().Pedantic = 0;
CI->getDiagnosticOpts().PedanticErrors = 0;
const char* groupNames[] = {
"c++98-compat", // CXX98Compat
"c++98-compat-pedantic", // CXX98CompatPedantic
"c++11-compat", // CXX11Compat
"c++-compat", // CXXCompat
"c++11-extensions", // CXX11
"c++1y-extensions", // CXX1y
"extern-c-compat", // ExternCCompat
"unicode" // Unicode
};
// CXXPre1yCompat is included by CXX98Compat
// CXXPre1yCompatPedantic is included by CXX98Compat
// CXX11CompatReservedUserDefinedLiteral
for (size_t i = 0; i < _countof(groupNames); ++i) {
if (Diags->setSeverityForGroup(diag::Flavor::WarningOrError,
StringRef(groupNames[i]),
diag::Severity::Ignored)) {
llvm_unreachable("there is a problem with diagnostic definitions.");
return nullptr;
}
}
Diags->setExtensionHandlingBehavior(diag::Severity::Ignored);
}
// HLSL Change Stops
// Override any files that need remapping
for (const auto &RemappedFile : RemappedFiles) {
CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first,
RemappedFile.second);
}
PreprocessorOptions &PPOpts = CI->getPreprocessorOpts();
PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors;
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
// HLSL Change Starts
AST->HlslLangExtensions = HlslLangExtensions;
// Enable -verify and -verify-ignore-unexpected on the libclang initialization path.
bool VerifyDiagnostics = CI->getDiagnosticOpts().VerifyDiagnostics;
Diags->getDiagnosticOptions().setVerifyIgnoreUnexpected(
CI->getDiagnosticOpts().getVerifyIgnoreUnexpected());
// HLSL Change Ends
ConfigureDiags(Diags, *AST, CaptureDiagnostics, VerifyDiagnostics); // HLSL Change
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
if (!VFS)
return nullptr;
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->TUKind = TUKind;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->IncludeBriefCommentsInCodeCompletion
= IncludeBriefCommentsInCodeCompletion;
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
#if 0 // HLSL Change - no support for serialization
if (ForSerialization)
AST->WriterData.reset(new ASTWriterData());
#endif // HLSL Change - no support for serialization
// Zero out now to ease cleanup during crash recovery.
CI = nullptr;
Diags = nullptr;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
if (AST->LoadFromCompilerInvocation(PCHContainerOps, PrecompilePreamble)) {
// Some error occurred, if caller wants to examine diagnostics, pass it the
// ASTUnit.
if (ErrAST) {
AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics);
ErrAST->swap(AST);
}
return nullptr;
}
return AST.release();
}
bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ArrayRef<RemappedFile> RemappedFiles) {
if (!Invocation)
return true;
clearFileLevelDecls();
SimpleTimer ParsingTimer(WantTiming);
ParsingTimer.setOutput("Reparsing " + getMainFileName());
// Remap files.
PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
for (const auto &RB : PPOpts.RemappedFileBuffers)
delete RB.second;
Invocation->getPreprocessorOpts().clearRemappedFiles();
for (const auto &RemappedFile : RemappedFiles) {
Invocation->getPreprocessorOpts().addRemappedFile(RemappedFile.first,
RemappedFile.second);
}
// If we have a preamble file lying around, or if we might try to
// build a precompiled preamble, do so now.
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (!getPreambleFile(this).empty() || PreambleRebuildCounter > 0)
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation);
// Clear out the diagnostics state.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
if (OverrideMainBuffer)
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
// Parse the sources
bool Result = Parse(PCHContainerOps, std::move(OverrideMainBuffer));
// If we're caching global code-completion results, and the top-level
// declarations have changed, clear out the code-completion cache.
if (!Result && ShouldCacheCodeCompletionResults &&
CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
CacheCodeCompletionResults();
// We now need to clear out the completion info related to this translation
// unit; it'll be recreated if necessary.
CCTUInfo.reset();
return Result;
}
//----------------------------------------------------------------------------//
// Code completion
//----------------------------------------------------------------------------//
namespace {
/// \brief Code completion consumer that combines the cached code-completion
/// results from an ASTUnit with the code-completion results provided to it,
/// then passes the result on to
class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
uint64_t NormalContexts;
ASTUnit &AST;
CodeCompleteConsumer &Next;
public:
AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
const CodeCompleteOptions &CodeCompleteOpts)
: CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
AST(AST), Next(Next)
{
// Compute the set of contexts in which we will look when we don't have
// any information about the specific context.
NormalContexts
= (1LL << CodeCompletionContext::CCC_TopLevel)
| (1LL << CodeCompletionContext::CCC_ObjCInterface)
| (1LL << CodeCompletionContext::CCC_ObjCImplementation)
| (1LL << CodeCompletionContext::CCC_ObjCIvarList)
| (1LL << CodeCompletionContext::CCC_Statement)
| (1LL << CodeCompletionContext::CCC_Expression)
| (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver)
| (1LL << CodeCompletionContext::CCC_DotMemberAccess)
| (1LL << CodeCompletionContext::CCC_ArrowMemberAccess)
| (1LL << CodeCompletionContext::CCC_ObjCPropertyAccess)
| (1LL << CodeCompletionContext::CCC_ObjCProtocolName)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression)
| (1LL << CodeCompletionContext::CCC_Recovery);
if (AST.getASTContext().getLangOpts().CPlusPlus)
NormalContexts |= (1LL << CodeCompletionContext::CCC_EnumTag)
| (1LL << CodeCompletionContext::CCC_UnionTag)
| (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
}
void ProcessCodeCompleteResults(Sema &S, CodeCompletionContext Context,
CodeCompletionResult *Results,
unsigned NumResults) override;
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates) override {
Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
}
CodeCompletionAllocator &getAllocator() override {
return Next.getAllocator();
}
CodeCompletionTUInfo &getCodeCompletionTUInfo() override {
return Next.getCodeCompletionTUInfo();
}
};
}
/// \brief Helper function that computes which global names are hidden by the
/// local code-completion results.
static void CalculateHiddenNames(const CodeCompletionContext &Context,
CodeCompletionResult *Results,
unsigned NumResults,
ASTContext &Ctx,
llvm::StringSet<llvm::BumpPtrAllocator> &HiddenNames){
bool OnlyTagNames = false;
switch (Context.getKind()) {
case CodeCompletionContext::CCC_Recovery:
case CodeCompletionContext::CCC_TopLevel:
case CodeCompletionContext::CCC_ObjCInterface:
case CodeCompletionContext::CCC_ObjCImplementation:
case CodeCompletionContext::CCC_ObjCIvarList:
case CodeCompletionContext::CCC_ClassStructUnion:
case CodeCompletionContext::CCC_Statement:
case CodeCompletionContext::CCC_Expression:
case CodeCompletionContext::CCC_ObjCMessageReceiver:
case CodeCompletionContext::CCC_DotMemberAccess:
case CodeCompletionContext::CCC_ArrowMemberAccess:
case CodeCompletionContext::CCC_ObjCPropertyAccess:
case CodeCompletionContext::CCC_Namespace:
case CodeCompletionContext::CCC_Type:
case CodeCompletionContext::CCC_Name:
case CodeCompletionContext::CCC_PotentiallyQualifiedName:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_ObjCInterfaceName:
break;
case CodeCompletionContext::CCC_EnumTag:
case CodeCompletionContext::CCC_UnionTag:
case CodeCompletionContext::CCC_ClassOrStructTag:
OnlyTagNames = true;
break;
case CodeCompletionContext::CCC_ObjCProtocolName:
case CodeCompletionContext::CCC_MacroName:
case CodeCompletionContext::CCC_MacroNameUse:
case CodeCompletionContext::CCC_PreprocessorExpression:
case CodeCompletionContext::CCC_PreprocessorDirective:
case CodeCompletionContext::CCC_NaturalLanguage:
case CodeCompletionContext::CCC_SelectorName:
case CodeCompletionContext::CCC_TypeQualifiers:
case CodeCompletionContext::CCC_Other:
case CodeCompletionContext::CCC_OtherWithMacros:
case CodeCompletionContext::CCC_ObjCInstanceMessage:
case CodeCompletionContext::CCC_ObjCClassMessage:
case CodeCompletionContext::CCC_ObjCCategoryName:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
}
typedef CodeCompletionResult Result;
for (unsigned I = 0; I != NumResults; ++I) {
if (Results[I].Kind != Result::RK_Declaration)
continue;
unsigned IDNS
= Results[I].Declaration->getUnderlyingDecl()->getIdentifierNamespace();
bool Hiding = false;
if (OnlyTagNames)
Hiding = (IDNS & Decl::IDNS_Tag);
else {
unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
Decl::IDNS_Namespace | Decl::IDNS_Ordinary |
Decl::IDNS_NonMemberOperator);
if (Ctx.getLangOpts().CPlusPlus)
HiddenIDNS |= Decl::IDNS_Tag;
Hiding = (IDNS & HiddenIDNS);
}
if (!Hiding)
continue;
DeclarationName Name = Results[I].Declaration->getDeclName();
if (IdentifierInfo *Identifier = Name.getAsIdentifierInfo())
HiddenNames.insert(Identifier->getName());
else
HiddenNames.insert(Name.getAsString());
}
}
void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
CodeCompletionContext Context,
CodeCompletionResult *Results,
unsigned NumResults) {
// Merge the results we were given with the results we cached.
bool AddedResult = false;
uint64_t InContexts =
Context.getKind() == CodeCompletionContext::CCC_Recovery
? NormalContexts : (1LL << Context.getKind());
// Contains the set of names that are hidden by "local" completion results.
llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
typedef CodeCompletionResult Result;
SmallVector<Result, 8> AllResults;
for (ASTUnit::cached_completion_iterator
C = AST.cached_completion_begin(),
CEnd = AST.cached_completion_end();
C != CEnd; ++C) {
// If the context we are in matches any of the contexts we are
// interested in, we'll add this result.
if ((C->ShowInContexts & InContexts) == 0)
continue;
// If we haven't added any results previously, do so now.
if (!AddedResult) {
CalculateHiddenNames(Context, Results, NumResults, S.Context,
HiddenNames);
AllResults.insert(AllResults.end(), Results, Results + NumResults);
AddedResult = true;
}
// Determine whether this global completion result is hidden by a local
// completion result. If so, skip it.
if (C->Kind != CXCursor_MacroDefinition &&
HiddenNames.count(C->Completion->getTypedText()))
continue;
// Adjust priority based on similar type classes.
unsigned Priority = C->Priority;
CodeCompletionString *Completion = C->Completion;
if (!Context.getPreferredType().isNull()) {
if (C->Kind == CXCursor_MacroDefinition) {
Priority = getMacroUsagePriority(C->Completion->getTypedText(),
S.getLangOpts(),
Context.getPreferredType()->isAnyPointerType());
} else if (C->Type) {
CanQualType Expected
= S.Context.getCanonicalType(
Context.getPreferredType().getUnqualifiedType());
SimplifiedTypeClass ExpectedSTC = getSimplifiedTypeClass(Expected);
if (ExpectedSTC == C->TypeClass) {
// We know this type is similar; check for an exact match.
llvm::StringMap<unsigned> &CachedCompletionTypes
= AST.getCachedCompletionTypes();
llvm::StringMap<unsigned>::iterator Pos
= CachedCompletionTypes.find(QualType(Expected).getAsString());
if (Pos != CachedCompletionTypes.end() && Pos->second == C->Type)
Priority /= CCF_ExactTypeMatch;
else
Priority /= CCF_SimilarTypeMatch;
}
}
}
// Adjust the completion string, if required.
if (C->Kind == CXCursor_MacroDefinition &&
Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
// Create a new code-completion string that just contains the
// macro name, without its arguments.
CodeCompletionBuilder Builder(getAllocator(), getCodeCompletionTUInfo(),
CCP_CodePattern, C->Availability);
Builder.AddTypedTextChunk(C->Completion->getTypedText());
Priority = CCP_CodePattern;
Completion = Builder.TakeString();
}
AllResults.push_back(Result(Completion, Priority, C->Kind,
C->Availability));
}
// If we did not add any cached completion results, just forward the
// results we were given to the next consumer.
if (!AddedResult) {
Next.ProcessCodeCompleteResults(S, Context, Results, NumResults);
return;
}
Next.ProcessCodeCompleteResults(S, Context, AllResults.data(),
AllResults.size());
}
void ASTUnit::CodeComplete(
StringRef File, unsigned Line, unsigned Column,
ArrayRef<RemappedFile> RemappedFiles, bool IncludeMacros,
bool IncludeCodePatterns, bool IncludeBriefComments,
CodeCompleteConsumer &Consumer,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticsEngine &Diag, LangOptions &LangOpts, SourceManager &SourceMgr,
FileManager &FileMgr, SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers) {
if (!Invocation)
return;
SimpleTimer CompletionTimer(WantTiming);
CompletionTimer.setOutput("Code completion @ " + File + ":" +
Twine(Line) + ":" + Twine(Column));
IntrusiveRefCntPtr<CompilerInvocation>
CCInvocation(new CompilerInvocation(*Invocation));
FrontendOptions &FrontendOpts = CCInvocation->getFrontendOpts();
CodeCompleteOptions &CodeCompleteOpts = FrontendOpts.CodeCompleteOpts;
PreprocessorOptions &PreprocessorOpts = CCInvocation->getPreprocessorOpts();
CodeCompleteOpts.IncludeMacros = IncludeMacros &&
CachedCompletionResults.empty();
CodeCompleteOpts.IncludeCodePatterns = IncludeCodePatterns;
CodeCompleteOpts.IncludeGlobals = CachedCompletionResults.empty();
CodeCompleteOpts.IncludeBriefComments = IncludeBriefComments;
assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
FrontendOpts.CodeCompletionAt.FileName = File;
FrontendOpts.CodeCompletionAt.Line = Line;
FrontendOpts.CodeCompletionAt.Column = Column;
// Set the language options appropriately.
LangOpts = *CCInvocation->getLangOpts();
// Spell-checking and warnings are wasteful during code-completion.
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS // HLSL Change
LangOpts.SpellChecking = false;
#endif
CCInvocation->getDiagnosticOpts().IgnoreWarnings = true;
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(PCHContainerOps));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(&*CCInvocation);
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
CaptureDroppedDiagnostics Capture(true,
Clang->getDiagnostics(),
StoredDiagnostics);
ProcessWarningOptions(Diag, CCInvocation->getDiagnosticOpts());
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
if (!Clang->hasTarget()) {
Clang->setInvocation(nullptr);
return;
}
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_AST &&
"FIXME: AST inputs not yet supported here!");
assert(Clang->getFrontendOpts().Inputs[0].getKind() != IK_LLVM_IR &&
"IR inputs not support here!");
// Use the source and file managers that we were given.
Clang->setFileManager(&FileMgr);
Clang->setSourceManager(&SourceMgr);
// Remap files.
PreprocessorOpts.clearRemappedFiles();
PreprocessorOpts.RetainRemappedFileBuffers = true;
for (const auto &RemappedFile : RemappedFiles) {
PreprocessorOpts.addRemappedFile(RemappedFile.first, RemappedFile.second);
OwnedBuffers.push_back(RemappedFile.second);
}
// Use the code completion consumer we were given, but adding any cached
// code-completion results.
AugmentedCodeCompleteConsumer *AugmentedConsumer
= new AugmentedCodeCompleteConsumer(*this, Consumer, CodeCompleteOpts);
Clang->setCodeCompletionConsumer(AugmentedConsumer);
// If we have a precompiled preamble, try to use it. We only allow
// the use of the precompiled preamble if we're if the completion
// point is within the main file, after the end of the precompiled
// preamble.
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (!getPreambleFile(this).empty()) {
std::string CompleteFilePath(File);
llvm::sys::fs::UniqueID CompleteFileID;
if (!llvm::sys::fs::getUniqueID(CompleteFilePath, CompleteFileID)) {
std::string MainPath(OriginalSourceFile);
llvm::sys::fs::UniqueID MainID;
if (!llvm::sys::fs::getUniqueID(MainPath, MainID)) {
if (CompleteFileID == MainID && Line > 1)
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
PCHContainerOps, *CCInvocation, false, Line - 1);
}
}
}
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile,
OverrideMainBuffer.get());
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
PreprocessorOpts.DisablePCHValidation = true;
OwnedBuffers.push_back(OverrideMainBuffer.release());
} else {
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
}
// Disable the preprocessing record if modules are not enabled.
if (!Clang->getLangOpts().Modules)
PreprocessorOpts.DetailedRecord = false;
std::unique_ptr<SyntaxOnlyAction> Act;
Act.reset(new SyntaxOnlyAction);
if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
Act->Execute();
Act->EndSourceFile();
}
}
bool ASTUnit::Save(StringRef File) {
if (HadModuleLoaderFatalFailure)
return true;
// Write to a temporary file and later rename it to the actual file, to avoid
// possible race conditions.
SmallString<128> TempPath;
TempPath = File;
TempPath += "-%%%%%%%%";
int fd;
if (llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath))
return true;
// FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
llvm::raw_fd_ostream Out(fd, /*shouldClose=*/true);
serialize(Out);
Out.close();
if (Out.has_error()) {
Out.clear_error();
return true;
}
if (llvm::sys::fs::rename(TempPath, File)) {
llvm::sys::fs::remove(TempPath);
return true;
}
return false;
}
#if 1 // HLSL Change Starts - no support for serialization
bool ASTUnit::serialize(raw_ostream &) { return false; }
#else
static bool serializeUnit(ASTWriter &Writer,
SmallVectorImpl<char> &Buffer,
Sema &S,
bool hasErrors,
raw_ostream &OS) {
Writer.WriteAST(S, std::string(), nullptr, "", hasErrors);
// Write the generated bitstream to "Out".
if (!Buffer.empty())
OS.write(Buffer.data(), Buffer.size());
return false;
}
bool ASTUnit::serialize(raw_ostream &OS) {
bool hasErrors = getDiagnostics().hasErrorOccurred();
if (WriterData)
return serializeUnit(WriterData->Writer, WriterData->Buffer,
getSema(), hasErrors, OS);
SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
ASTWriter Writer(Stream);
return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
}
#endif // HLSL Change Ends - no support for serialization
typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
void ASTUnit::TranslateStoredDiagnostics(
FileManager &FileMgr,
SourceManager &SrcMgr,
const SmallVectorImpl<StandaloneDiagnostic> &Diags,
SmallVectorImpl<StoredDiagnostic> &Out) {
// Map the standalone diagnostic into the new source manager. We also need to
// remap all the locations to the new view. This includes the diag location,
// any associated source ranges, and the source ranges of associated fix-its.
// FIXME: There should be a cleaner way to do this.
SmallVector<StoredDiagnostic, 4> Result;
Result.reserve(Diags.size());
for (const StandaloneDiagnostic &SD : Diags) {
// Rebuild the StoredDiagnostic.
if (SD.Filename.empty())
continue;
const FileEntry *FE = FileMgr.getFile(SD.Filename);
if (!FE)
continue;
FileID FID = SrcMgr.translateFile(FE);
SourceLocation FileLoc = SrcMgr.getLocForStartOfFile(FID);
if (FileLoc.isInvalid())
continue;
SourceLocation L = FileLoc.getLocWithOffset(SD.LocOffset);
FullSourceLoc Loc(L, SrcMgr);
SmallVector<CharSourceRange, 4> Ranges;
Ranges.reserve(SD.Ranges.size());
for (const auto &Range : SD.Ranges) {
SourceLocation BL = FileLoc.getLocWithOffset(Range.first);
SourceLocation EL = FileLoc.getLocWithOffset(Range.second);
Ranges.push_back(CharSourceRange::getCharRange(BL, EL));
}
SmallVector<FixItHint, 2> FixIts;
FixIts.reserve(SD.FixIts.size());
for (const StandaloneFixIt &FixIt : SD.FixIts) {
FixIts.push_back(FixItHint());
FixItHint &FH = FixIts.back();
FH.CodeToInsert = FixIt.CodeToInsert;
SourceLocation BL = FileLoc.getLocWithOffset(FixIt.RemoveRange.first);
SourceLocation EL = FileLoc.getLocWithOffset(FixIt.RemoveRange.second);
FH.RemoveRange = CharSourceRange::getCharRange(BL, EL);
}
Result.push_back(StoredDiagnostic(SD.Level, SD.ID,
SD.Message, Loc, Ranges, FixIts));
}
Result.swap(Out);
}
void ASTUnit::addFileLevelDecl(Decl *D) {
assert(D);
// We only care about local declarations.
if (D->isFromASTFile())
return;
SourceManager &SM = *SourceMgr;
SourceLocation Loc = D->getLocation();
if (Loc.isInvalid() || !SM.isLocalSourceLocation(Loc))
return;
// We only keep track of the file-level declarations of each file.
if (!D->getLexicalDeclContext()->isFileContext())
return;
SourceLocation FileLoc = SM.getFileLoc(Loc);
assert(SM.isLocalSourceLocation(FileLoc));
FileID FID;
unsigned Offset;
std::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
if (FID.isInvalid())
return;
LocDeclsTy *&Decls = FileDecls[FID];
if (!Decls)
Decls = new LocDeclsTy();
std::pair<unsigned, Decl *> LocDecl(Offset, D);
if (Decls->empty() || Decls->back().first <= Offset) {
Decls->push_back(LocDecl);
return;
}
LocDeclsTy::iterator I = std::upper_bound(Decls->begin(), Decls->end(),
LocDecl, llvm::less_first());
Decls->insert(I, LocDecl);
}
void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
SmallVectorImpl<Decl *> &Decls) {
if (File.isInvalid())
return;
if (SourceMgr->isLoadedFileID(File)) {
assert(Ctx->getExternalSource() && "No external source!");
return Ctx->getExternalSource()->FindFileRegionDecls(File, Offset, Length,
Decls);
}
FileDeclsTy::iterator I = FileDecls.find(File);
if (I == FileDecls.end())
return;
LocDeclsTy &LocDecls = *I->second;
if (LocDecls.empty())
return;
LocDeclsTy::iterator BeginIt =
std::lower_bound(LocDecls.begin(), LocDecls.end(),
std::make_pair(Offset, (Decl *)nullptr),
llvm::less_first());
if (BeginIt != LocDecls.begin())
--BeginIt;
// If we are pointing at a top-level decl inside an objc container, we need
// to backtrack until we find it otherwise we will fail to report that the
// region overlaps with an objc container.
while (BeginIt != LocDecls.begin() &&
BeginIt->second->isTopLevelDeclInObjCContainer())
--BeginIt;
LocDeclsTy::iterator EndIt = std::upper_bound(
LocDecls.begin(), LocDecls.end(),
std::make_pair(Offset + Length, (Decl *)nullptr), llvm::less_first());
if (EndIt != LocDecls.end())
++EndIt;
for (LocDeclsTy::iterator DIt = BeginIt; DIt != EndIt; ++DIt)
Decls.push_back(DIt->second);
}
SourceLocation ASTUnit::getLocation(const FileEntry *File,
unsigned Line, unsigned Col) const {
const SourceManager &SM = getSourceManager();
SourceLocation Loc = SM.translateFileLineCol(File, Line, Col);
return SM.getMacroArgExpandedLocation(Loc);
}
SourceLocation ASTUnit::getLocation(const FileEntry *File,
unsigned Offset) const {
const SourceManager &SM = getSourceManager();
SourceLocation FileLoc = SM.translateFileLineCol(File, 1, 1);
return SM.getMacroArgExpandedLocation(FileLoc.getLocWithOffset(Offset));
}
/// \brief If \arg Loc is a loaded location from the preamble, returns
/// the corresponding local location of the main file, otherwise it returns
/// \arg Loc.
SourceLocation ASTUnit::mapLocationFromPreamble(SourceLocation Loc) {
FileID PreambleID;
if (SourceMgr)
PreambleID = SourceMgr->getPreambleFileID();
if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
return Loc;
unsigned Offs;
if (SourceMgr->isInFileID(Loc, PreambleID, &Offs) && Offs < Preamble.size()) {
SourceLocation FileLoc
= SourceMgr->getLocForStartOfFile(SourceMgr->getMainFileID());
return FileLoc.getLocWithOffset(Offs);
}
return Loc;
}
/// \brief If \arg Loc is a local location of the main file but inside the
/// preamble chunk, returns the corresponding loaded location from the
/// preamble, otherwise it returns \arg Loc.
SourceLocation ASTUnit::mapLocationToPreamble(SourceLocation Loc) {
FileID PreambleID;
if (SourceMgr)
PreambleID = SourceMgr->getPreambleFileID();
if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
return Loc;
unsigned Offs;
if (SourceMgr->isInFileID(Loc, SourceMgr->getMainFileID(), &Offs) &&
Offs < Preamble.size()) {
SourceLocation FileLoc = SourceMgr->getLocForStartOfFile(PreambleID);
return FileLoc.getLocWithOffset(Offs);
}
return Loc;
}
bool ASTUnit::isInPreambleFileID(SourceLocation Loc) {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getPreambleFileID();
if (Loc.isInvalid() || FID.isInvalid())
return false;
return SourceMgr->isInFileID(Loc, FID);
}
bool ASTUnit::isInMainFileID(SourceLocation Loc) {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getMainFileID();
if (Loc.isInvalid() || FID.isInvalid())
return false;
return SourceMgr->isInFileID(Loc, FID);
}
SourceLocation ASTUnit::getEndOfPreambleFileID() {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getPreambleFileID();
if (FID.isInvalid())
return SourceLocation();
return SourceMgr->getLocForEndOfFile(FID);
}
SourceLocation ASTUnit::getStartOfMainFileID() {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getMainFileID();
if (FID.isInvalid())
return SourceLocation();
return SourceMgr->getLocForStartOfFile(FID);
}
llvm::iterator_range<PreprocessingRecord::iterator>
ASTUnit::getLocalPreprocessingEntities() const {
#if 0 // HLSL Change Starts - no support for modules
if (isMainFileAST()) {
serialization::ModuleFile &
Mod = Reader->getModuleManager().getPrimaryModule();
return Reader->getModulePreprocessedEntities(Mod);
}
#endif // HLSL Change Ends - no support for modules
if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord())
return llvm::make_range(PPRec->local_begin(), PPRec->local_end());
return llvm::make_range(PreprocessingRecord::iterator(),
PreprocessingRecord::iterator());
}
bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
#if 0 // HLSL Change Starts - no support for modules
if (isMainFileAST()) {
serialization::ModuleFile &
Mod = Reader->getModuleManager().getPrimaryModule();
for (const Decl *D : Reader->getModuleFileLevelDecls(Mod)) {
if (!Fn(context, D))
return false;
}
return true;
}
#endif // HLSL Change Ends - no support for modules
for (ASTUnit::top_level_iterator TL = top_level_begin(),
TLEnd = top_level_end();
TL != TLEnd; ++TL) {
if (!Fn(context, *TL))
return false;
}
return true;
}
namespace {
struct PCHLocatorInfo {
serialization::ModuleFile *Mod;
PCHLocatorInfo() : Mod(nullptr) {}
};
}
#if 0 // HLSL Change Starts - no support for PCH
static bool PCHLocator(serialization::ModuleFile &M, void *UserData) {
PCHLocatorInfo &Info = *static_cast<PCHLocatorInfo*>(UserData);
switch (M.Kind) {
case serialization::MK_ImplicitModule:
case serialization::MK_ExplicitModule:
return true; // skip dependencies.
case serialization::MK_PCH:
Info.Mod = &M;
return true; // found it.
case serialization::MK_Preamble:
return false; // look in dependencies.
case serialization::MK_MainFile:
return false; // look in dependencies.
}
return true;
}
#endif // HLSL Change Ends - no support for PCH
const FileEntry *ASTUnit::getPCHFile() {
#if 1 // HLSL Change Starts - no support for modules or PCH
return nullptr;
#else
if (!Reader)
return nullptr;
PCHLocatorInfo Info;
Reader->getModuleManager().visit(PCHLocator, &Info);
if (Info.Mod)
return Info.Mod->File;
return nullptr;
#endif // HLSL Change Ends - no support for modules or PCH
}
bool ASTUnit::isModuleFile() {
return false; // return isMainFileAST() && !ASTFileLangOpts.CurrentModule.empty(); // HLSL Change - no support for modules
}
void ASTUnit::PreambleData::countLines() const {
NumLines = 0;
if (empty())
return;
NumLines = std::count(Buffer.begin(), Buffer.end(), '\n');
if (Buffer.back() != '\n')
++NumLines;
}
#ifndef NDEBUG
ASTUnit::ConcurrencyState::ConcurrencyState() {
Mutex = new llvm::sys::MutexImpl(/*recursive=*/true);
}
ASTUnit::ConcurrencyState::~ConcurrencyState() {
delete static_cast<llvm::sys::MutexImpl *>(Mutex);
}
void ASTUnit::ConcurrencyState::start() {
bool acquired = static_cast<llvm::sys::MutexImpl *>(Mutex)->tryacquire();
assert(acquired && "Concurrent access to ASTUnit!");
}
void ASTUnit::ConcurrencyState::finish() {
static_cast<llvm::sys::MutexImpl *>(Mutex)->release();
}
#else // NDEBUG
ASTUnit::ConcurrencyState::ConcurrencyState() { Mutex = 0; }
ASTUnit::ConcurrencyState::~ConcurrencyState() {}
void ASTUnit::ConcurrencyState::start() {}
void ASTUnit::ConcurrencyState::finish() {}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp | //===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Hacks and fun related to the code rewriter.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#ifdef CLANG_ENABLE_OBJC_REWRITER
using namespace clang;
using llvm::utostr;
namespace {
class RewriteObjC : public ASTConsumer {
protected:
enum {
BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
block, ... */
BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
__block variable */
BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
helpers */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
};
enum {
BLOCK_NEEDS_FREE = (1 << 24),
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GC = (1 << 27),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_HAS_DESCRIPTOR = (1 << 29)
};
static const int OBJC_ABI_VERSION = 7;
Rewriter Rewrite;
DiagnosticsEngine &Diags;
const LangOptions &LangOpts;
ASTContext *Context;
SourceManager *SM;
TranslationUnitDecl *TUDecl;
FileID MainFileID;
const char *MainFileStart, *MainFileEnd;
Stmt *CurrentBody;
ParentMap *PropParentMap; // created lazily.
std::string InFileName;
raw_ostream* OutFile;
std::string Preamble;
TypeDecl *ProtocolTypeDecl;
VarDecl *GlobalVarDecl;
unsigned RewriteFailedDiag;
// ObjC string constant support.
unsigned NumObjCStringLiterals;
VarDecl *ConstantStringClassReference;
RecordDecl *NSStringRecord;
// ObjC foreach break/continue generation support.
int BcLabelCount;
unsigned TryFinallyContainsReturnDiag;
// Needed for super.
ObjCMethodDecl *CurMethodDef;
RecordDecl *SuperStructDecl;
RecordDecl *ConstantStringDecl;
FunctionDecl *MsgSendFunctionDecl;
FunctionDecl *MsgSendSuperFunctionDecl;
FunctionDecl *MsgSendStretFunctionDecl;
FunctionDecl *MsgSendSuperStretFunctionDecl;
FunctionDecl *MsgSendFpretFunctionDecl;
FunctionDecl *GetClassFunctionDecl;
FunctionDecl *GetMetaClassFunctionDecl;
FunctionDecl *GetSuperClassFunctionDecl;
FunctionDecl *SelGetUidFunctionDecl;
FunctionDecl *CFStringFunctionDecl;
FunctionDecl *SuperConstructorFunctionDecl;
FunctionDecl *CurFunctionDef;
FunctionDecl *CurFunctionDeclToDeclareForBlock;
/* Misc. containers needed for meta-data rewrite. */
SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
SmallVector<Stmt *, 32> Stmts;
SmallVector<int, 8> ObjCBcLabelNo;
// Remember all the @protocol(<expr>) expressions.
llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
llvm::DenseSet<uint64_t> CopyDestroyCache;
// Block expressions.
SmallVector<BlockExpr *, 32> Blocks;
SmallVector<int, 32> InnerDeclRefsCount;
SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
// Block related declarations.
SmallVector<ValueDecl *, 8> BlockByCopyDecls;
llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
SmallVector<ValueDecl *, 8> BlockByRefDecls;
llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
// This maps an original source AST to it's rewritten form. This allows
// us to avoid rewriting the same node twice (which is very uncommon).
// This is needed to support some of the exotic property rewriting.
llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
// Needed for header files being rewritten
bool IsHeader;
bool SilenceRewriteMacroWarning;
bool objc_impl_method;
bool DisableReplaceStmt;
class DisableReplaceStmtScope {
RewriteObjC &R;
bool SavedValue;
public:
DisableReplaceStmtScope(RewriteObjC &R)
: R(R), SavedValue(R.DisableReplaceStmt) {
R.DisableReplaceStmt = true;
}
~DisableReplaceStmtScope() {
R.DisableReplaceStmt = SavedValue;
}
};
void InitializeCommon(ASTContext &context);
public:
// Top Level Driver code.
bool HandleTopLevelDecl(DeclGroupRef D) override {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
if (!Class->isThisDeclarationADefinition()) {
RewriteForwardClassDecl(D);
break;
}
}
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
if (!Proto->isThisDeclarationADefinition()) {
RewriteForwardProtocolDecl(D);
break;
}
}
HandleTopLevelSingleDecl(*I);
}
return true;
}
void HandleTopLevelSingleDecl(Decl *D);
void HandleDeclInMainFile(Decl *D);
RewriteObjC(std::string inFile, raw_ostream *OS,
DiagnosticsEngine &D, const LangOptions &LOpts,
bool silenceMacroWarn);
~RewriteObjC() override {}
void HandleTranslationUnit(ASTContext &C) override;
void ReplaceStmt(Stmt *Old, Stmt *New) {
ReplaceStmtWithRange(Old, New, Old->getSourceRange());
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
Stmt *ReplacingStmt = ReplacedNodes[Old];
if (ReplacingStmt)
return; // We can't rewrite the same node twice.
if (DisableReplaceStmt)
return;
// Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
<< Old->getSourceRange();
return;
}
// Get the new text.
std::string SStr;
llvm::raw_string_ostream S(SStr);
New->printPretty(S, nullptr, PrintingPolicy(LangOpts));
const std::string &Str = S.str();
// If replacement succeeded or warning disabled return with no warning.
if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
ReplacedNodes[Old] = New;
return;
}
if (SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
<< Old->getSourceRange();
}
void InsertText(SourceLocation Loc, StringRef Str,
bool InsertAfter = true) {
// If insertion succeeded or warning disabled return with no warning.
if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
}
void ReplaceText(SourceLocation Start, unsigned OrigLength,
StringRef Str) {
// If removal succeeded or warning disabled return with no warning.
if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
}
// Syntactic Rewriting.
void RewriteRecordBody(RecordDecl *RD);
void RewriteInclude();
void RewriteForwardClassDecl(DeclGroupRef D);
void RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &DG);
void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString);
void RewriteImplementations();
void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID);
void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
void RewriteImplementationDecl(Decl *Dcl);
void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
ObjCMethodDecl *MDecl, std::string &ResultStr);
void RewriteTypeIntoString(QualType T, std::string &ResultStr,
const FunctionType *&FPRetType);
void RewriteByRefString(std::string &ResultStr, const std::string &Name,
ValueDecl *VD, bool def=false);
void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
void RewriteForwardProtocolDecl(DeclGroupRef D);
void RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG);
void RewriteMethodDeclaration(ObjCMethodDecl *Method);
void RewriteProperty(ObjCPropertyDecl *prop);
void RewriteFunctionDecl(FunctionDecl *FD);
void RewriteBlockPointerType(std::string& Str, QualType Type);
void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
void RewriteTypeOfDecl(VarDecl *VD);
void RewriteObjCQualifiedInterfaceTypes(Expr *E);
// Expression Rewriting.
Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
void RewriteTryReturnStmts(Stmt *S);
void RewriteSyncReturnStmts(Stmt *S, std::string buf);
Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd);
Stmt *RewriteBreakStmt(BreakStmt *S);
Stmt *RewriteContinueStmt(ContinueStmt *S);
void RewriteCastExpr(CStyleCastExpr *CE);
// Block rewriting.
void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
// Block specific rewrite rules.
void RewriteBlockPointerDecl(NamedDecl *VD);
void RewriteByRefVar(VarDecl *VD);
Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result);
void Initialize(ASTContext &context) override = 0;
// Metadata Rewriting.
virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
virtual void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
StringRef prefix,
StringRef ClassName,
std::string &Result) = 0;
virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
std::string &Result) = 0;
virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
StringRef prefix,
StringRef ClassName,
std::string &Result) = 0;
virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result) = 0;
// Rewriting ivar access
virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0;
virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) = 0;
// Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
Expr **args, unsigned nargs,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
QualType msgSendType,
QualType returnType,
SmallVectorImpl<QualType> &ArgTypes,
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method);
Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
void SynthCountByEnumWithState(std::string &buf);
void SynthMsgSendFunctionDecl();
void SynthMsgSendSuperFunctionDecl();
void SynthMsgSendStretFunctionDecl();
void SynthMsgSendFpretFunctionDecl();
void SynthMsgSendSuperStretFunctionDecl();
void SynthGetClassFunctionDecl();
void SynthGetMetaClassFunctionDecl();
void SynthGetSuperClassFunctionDecl();
void SynthSelGetUidFunctionDecl();
void SynthSuperConstructorFunctionDecl();
std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
StringRef funcName, std::string Tag);
std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
StringRef funcName, std::string Tag);
std::string SynthesizeBlockImpl(BlockExpr *CE,
std::string Tag, std::string Desc);
std::string SynthesizeBlockDescriptor(std::string DescTag,
std::string ImplTag,
int i, StringRef funcName,
unsigned hasCopy);
Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
void SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName);
FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
Stmt *SynthBlockInitExpr(BlockExpr *Exp,
const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs);
// Misc. helper routines.
QualType getProtocolType();
void WarnAboutReturnGotoStmts(Stmt *S);
void HasReturnStmts(Stmt *S, bool &hasReturns);
void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
bool IsDeclStmtInForeachHeader(DeclStmt *DS);
void CollectBlockDeclRefInfo(BlockExpr *Exp);
void GetBlockDeclRefExprs(Stmt *S);
void GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
// canonical type. We only care if the top-level type is a closure pointer.
bool isTopLevelBlockPointerType(QualType T) {
return isa<BlockPointerType>(T);
}
/// convertBlockPointerToFunctionPointer - Converts a block-pointer type
/// to a function pointer type and upon success, returns true; false
/// otherwise.
bool convertBlockPointerToFunctionPointer(QualType &T) {
if (isTopLevelBlockPointerType(T)) {
const BlockPointerType *BPT = T->getAs<BlockPointerType>();
T = Context->getPointerType(BPT->getPointeeType());
return true;
}
return false;
}
bool needToScanForQualifiers(QualType T);
QualType getSuperStructType();
QualType getConstantStringStructType();
QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
void convertToUnqualifiedObjCType(QualType &T) {
if (T->isObjCQualifiedIdType())
T = Context->getObjCIdType();
else if (T->isObjCQualifiedClassType())
T = Context->getObjCClassType();
else if (T->isObjCObjectPointerType() &&
T->getPointeeType()->isObjCQualifiedInterfaceType()) {
if (const ObjCObjectPointerType * OBJPT =
T->getAsObjCInterfacePointerType()) {
const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
T = QualType(IFaceT, 0);
T = Context->getPointerType(T);
}
}
}
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
OCT == Context->getCanonicalType(Context->getObjCClassType()))
return true;
if (const PointerType *PT = OCT->getAs<PointerType>()) {
if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
PT->getPointeeType()->isObjCQualifiedIdType())
return true;
}
return false;
}
bool PointerTypeTakesAnyBlockArguments(QualType QT);
bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
void GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen);
void QuoteDoublequotes(std::string &From, std::string &To) {
for (unsigned i = 0; i < From.length(); i++) {
if (From[i] == '"')
To += "\\\"";
else
To += From[i];
}
}
QualType getSimpleFunctionType(QualType result,
ArrayRef<QualType> args,
bool variadic = false) {
if (result == Context->getObjCInstanceType())
result = Context->getObjCIdType();
FunctionProtoType::ExtProtoInfo fpi;
fpi.Variadic = variadic;
return Context->getFunctionType(result, args, fpi);
}
// Helper function: create a CStyleCastExpr with trivial type source info.
CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
TInfo, SourceLocation(), SourceLocation());
}
StringLiteral *getStringLiteral(StringRef Str) {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
0);
return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
/*Pascal=*/false, StrType, SourceLocation());
}
};
class RewriteObjCFragileABI : public RewriteObjC {
public:
RewriteObjCFragileABI(std::string inFile, raw_ostream *OS,
DiagnosticsEngine &D, const LangOptions &LOpts,
bool silenceMacroWarn) : RewriteObjC(inFile, OS,
D, LOpts,
silenceMacroWarn) {}
~RewriteObjCFragileABI() override {}
void Initialize(ASTContext &context) override;
// Rewriting metadata
template<typename MethodIterator>
void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
MethodIterator MethodEnd,
bool IsInstanceMethod,
StringRef prefix,
StringRef ClassName,
std::string &Result);
void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
StringRef prefix, StringRef ClassName,
std::string &Result) override;
void RewriteObjCProtocolListMetaData(
const ObjCList<ObjCProtocolDecl> &Prots,
StringRef prefix, StringRef ClassName, std::string &Result) override;
void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result) override;
void RewriteMetaDataIntoBuffer(std::string &Result) override;
void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
std::string &Result) override;
// Rewriting ivar
void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) override;
Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) override;
};
}
void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
NamedDecl *D) {
if (const FunctionProtoType *fproto
= dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
for (const auto &I : fproto->param_types())
if (isTopLevelBlockPointerType(I)) {
// All the args are checked/rewritten. Don't call twice!
RewriteBlockPointerDecl(D);
break;
}
}
}
void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
const PointerType *PT = funcType->getAs<PointerType>();
if (PT && PointerTypeTakesAnyBlockArguments(funcType))
RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
}
static bool IsHeaderFile(const std::string &Filename) {
std::string::size_type DotPos = Filename.rfind('.');
if (DotPos == std::string::npos) {
// no file extension
return false;
}
std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";
}
RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS,
DiagnosticsEngine &D, const LangOptions &LOpts,
bool silenceMacroWarn)
: Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
SilenceRewriteMacroWarning(silenceMacroWarn) {
IsHeader = IsHeaderFile(inFile);
RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
"rewriting sub-expression within a macro (may not be correct)");
TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
DiagnosticsEngine::Warning,
"rewriter doesn't support user-specified control flow semantics "
"for @try/@finally (code may not execute properly)");
}
std::unique_ptr<ASTConsumer>
clang::CreateObjCRewriter(const std::string &InFile, raw_ostream *OS,
DiagnosticsEngine &Diags, const LangOptions &LOpts,
bool SilenceRewriteMacroWarning) {
return llvm::make_unique<RewriteObjCFragileABI>(InFile, OS, Diags, LOpts,
SilenceRewriteMacroWarning);
}
void RewriteObjC::InitializeCommon(ASTContext &context) {
Context = &context;
SM = &Context->getSourceManager();
TUDecl = Context->getTranslationUnitDecl();
MsgSendFunctionDecl = nullptr;
MsgSendSuperFunctionDecl = nullptr;
MsgSendStretFunctionDecl = nullptr;
MsgSendSuperStretFunctionDecl = nullptr;
MsgSendFpretFunctionDecl = nullptr;
GetClassFunctionDecl = nullptr;
GetMetaClassFunctionDecl = nullptr;
GetSuperClassFunctionDecl = nullptr;
SelGetUidFunctionDecl = nullptr;
CFStringFunctionDecl = nullptr;
ConstantStringClassReference = nullptr;
NSStringRecord = nullptr;
CurMethodDef = nullptr;
CurFunctionDef = nullptr;
CurFunctionDeclToDeclareForBlock = nullptr;
GlobalVarDecl = nullptr;
SuperStructDecl = nullptr;
ProtocolTypeDecl = nullptr;
ConstantStringDecl = nullptr;
BcLabelCount = 0;
SuperConstructorFunctionDecl = nullptr;
NumObjCStringLiterals = 0;
PropParentMap = nullptr;
CurrentBody = nullptr;
DisableReplaceStmt = false;
objc_impl_method = false;
// Get the ID and start/end of the main file.
MainFileID = SM->getMainFileID();
const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
MainFileStart = MainBuf->getBufferStart();
MainFileEnd = MainBuf->getBufferEnd();
Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
}
//===----------------------------------------------------------------------===//
// Top Level Driver Code
//===----------------------------------------------------------------------===//
void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
if (Diags.hasErrorOccurred())
return;
// Two cases: either the decl could be in the main file, or it could be in a
// #included file. If the former, rewrite it now. If the later, check to see
// if we rewrote the #include/#import.
SourceLocation Loc = D->getLocation();
Loc = SM->getExpansionLoc(Loc);
// If this is for a builtin, ignore it.
if (Loc.isInvalid()) return;
// Look for built-in declarations that we need to refer during the rewrite.
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
RewriteFunctionDecl(FD);
} else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
// declared in <Foundation/NSString.h>
if (FVD->getName() == "_NSConstantStringClassReference") {
ConstantStringClassReference = FVD;
return;
}
} else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
if (ID->isThisDeclarationADefinition())
RewriteInterfaceDecl(ID);
} else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
RewriteCategoryDecl(CD);
} else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
if (PD->isThisDeclarationADefinition())
RewriteProtocolDecl(PD);
} else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
// Recurse into linkage specifications
for (DeclContext::decl_iterator DI = LSD->decls_begin(),
DIEnd = LSD->decls_end();
DI != DIEnd; ) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
if (!IFace->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
SourceLocation StartLoc = IFace->getLocStart();
do {
if (isa<ObjCInterfaceDecl>(*DI) &&
!cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
StartLoc == (*DI)->getLocStart())
DG.push_back(*DI);
else
break;
++DI;
} while (DI != DIEnd);
RewriteForwardClassDecl(DG);
continue;
}
}
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
if (!Proto->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
SourceLocation StartLoc = Proto->getLocStart();
do {
if (isa<ObjCProtocolDecl>(*DI) &&
!cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
StartLoc == (*DI)->getLocStart())
DG.push_back(*DI);
else
break;
++DI;
} while (DI != DIEnd);
RewriteForwardProtocolDecl(DG);
continue;
}
}
HandleTopLevelSingleDecl(*DI);
++DI;
}
}
// If we have a decl in the main file, see if we should rewrite it.
if (SM->isWrittenInMainFile(Loc))
return HandleDeclInMainFile(D);
}
//===----------------------------------------------------------------------===//
// Syntactic (non-AST) Rewriting Code
//===----------------------------------------------------------------------===//
void RewriteObjC::RewriteInclude() {
SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
StringRef MainBuf = SM->getBufferData(MainFileID);
const char *MainBufStart = MainBuf.begin();
const char *MainBufEnd = MainBuf.end();
size_t ImportLen = strlen("import");
// Loop over the whole file, looking for includes.
for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
if (*BufPtr == '#') {
if (++BufPtr == MainBufEnd)
return;
while (*BufPtr == ' ' || *BufPtr == '\t')
if (++BufPtr == MainBufEnd)
return;
if (!strncmp(BufPtr, "import", ImportLen)) {
// replace import with include
SourceLocation ImportLoc =
LocStart.getLocWithOffset(BufPtr-MainBufStart);
ReplaceText(ImportLoc, ImportLen, "include");
BufPtr += ImportLen;
}
}
}
}
static std::string getIvarAccessString(ObjCIvarDecl *OID) {
const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
std::string S;
S = "((struct ";
S += ClassDecl->getIdentifier()->getName();
S += "_IMPL *)self)->";
S += OID->getName();
return S;
}
void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID) {
static bool objcGetPropertyDefined = false;
static bool objcSetPropertyDefined = false;
SourceLocation startLoc = PID->getLocStart();
InsertText(startLoc, "// ");
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synthesize location");
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "@synthesize: can't find ';'");
SourceLocation onePastSemiLoc =
startLoc.getLocWithOffset(semiBuf-startBuf+1);
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
return; // FIXME: is this correct?
// Generate the 'getter' function.
ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
if (!OID)
return;
unsigned Attributes = PD->getPropertyAttributes();
if (!PD->getGetterMethodDecl()->isDefined()) {
bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
(Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
Getr = "\nextern \"C\" __declspec(dllimport) "
"id objc_getProperty(id, SEL, long, bool);\n";
}
RewriteObjCMethodDecl(OID->getContainingInterface(),
PD->getGetterMethodDecl(), Getr);
Getr += "{ ";
// Synthesize an explicit cast to gain access to the ivar.
// See objc-act.c:objc_synthesize_new_getter() for details.
if (GenGetProperty) {
// return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
Getr += "typedef ";
const FunctionType *FPRetType = nullptr;
RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
FPRetType);
Getr += " _TYPE";
if (FPRetType) {
Getr += ")"; // close the precedence "scope" for "*".
// Now, emit the argument types (if any).
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
Getr += "(";
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
if (i) Getr += ", ";
std::string ParamStr =
FT->getParamType(i).getAsString(Context->getPrintingPolicy());
Getr += ParamStr;
}
if (FT->isVariadic()) {
if (FT->getNumParams())
Getr += ", ";
Getr += "...";
}
Getr += ")";
} else
Getr += "()";
}
Getr += ";\n";
Getr += "return (_TYPE)";
Getr += "objc_getProperty(self, _cmd, ";
RewriteIvarOffsetComputation(OID, Getr);
Getr += ", 1)";
}
else
Getr += "return " + getIvarAccessString(OID);
Getr += "; }";
InsertText(onePastSemiLoc, Getr);
}
if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
return;
// Generate the 'setter' function.
std::string Setr;
bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
Setr = "\nextern \"C\" __declspec(dllimport) "
"void objc_setProperty (id, SEL, long, id, bool, bool);\n";
}
RewriteObjCMethodDecl(OID->getContainingInterface(),
PD->getSetterMethodDecl(), Setr);
Setr += "{ ";
// Synthesize an explicit cast to initialize the ivar.
// See objc-act.c:objc_synthesize_new_setter() for details.
if (GenSetProperty) {
Setr += "objc_setProperty (self, _cmd, ";
RewriteIvarOffsetComputation(OID, Setr);
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
Setr += "1)";
else
Setr += "0)";
}
else {
Setr += getIvarAccessString(OID) + " = ";
Setr += PD->getName();
}
Setr += "; }";
InsertText(onePastSemiLoc, Setr);
}
static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
std::string &typedefString) {
typedefString += "#ifndef _REWRITER_typedef_";
typedefString += ForwardDecl->getNameAsString();
typedefString += "\n";
typedefString += "#define _REWRITER_typedef_";
typedefString += ForwardDecl->getNameAsString();
typedefString += "\n";
typedefString += "typedef struct objc_object ";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";\n#endif\n";
}
void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString) {
SourceLocation startLoc = ClassDecl->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiPtr = strchr(startBuf, ';');
// Replace the @class with typedefs corresponding to the classes.
ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
}
void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
std::string typedefString;
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
if (I == D.begin()) {
// Translate to typedef's that forward reference structs with the same name
// as the class. As a convenience, we include the original declaration
// as a comment.
typedefString += "// @class ";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";\n";
}
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
DeclGroupRef::iterator I = D.begin();
RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
}
void RewriteObjC::RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &D) {
std::string typedefString;
for (unsigned i = 0; i < D.size(); i++) {
ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
if (i == 0) {
typedefString += "// @class ";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";\n";
}
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
}
void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
// When method is a synthesized one, such as a getter/setter there is
// nothing to rewrite.
if (Method->isImplicit())
return;
SourceLocation LocStart = Method->getLocStart();
SourceLocation LocEnd = Method->getLocEnd();
if (SM->getExpansionLineNumber(LocEnd) >
SM->getExpansionLineNumber(LocStart)) {
InsertText(LocStart, "#if 0\n");
ReplaceText(LocEnd, 1, ";\n#endif\n");
} else {
InsertText(LocStart, "// ");
}
}
void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) {
SourceLocation Loc = prop->getAtLoc();
ReplaceText(Loc, 0, "// ");
// FIXME: handle properties that are declared across multiple lines.
}
void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
SourceLocation LocStart = CatDecl->getLocStart();
// FIXME: handle category headers that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
for (auto *I : CatDecl->properties())
RewriteProperty(I);
for (auto *I : CatDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : CatDecl->class_methods())
RewriteMethodDeclaration(I);
// Lastly, comment out the @end.
ReplaceText(CatDecl->getAtEndRange().getBegin(),
strlen("@end"), "/* @end */");
}
void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
SourceLocation LocStart = PDecl->getLocStart();
assert(PDecl->isThisDeclarationADefinition());
// FIXME: handle protocol headers that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
for (auto *I : PDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : PDecl->class_methods())
RewriteMethodDeclaration(I);
for (auto *I : PDecl->properties())
RewriteProperty(I);
// Lastly, comment out the @end.
SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
ReplaceText(LocEnd, strlen("@end"), "/* @end */");
// Must comment out @optional/@required
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
for (const char *p = startBuf; p < endBuf; p++) {
if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
}
else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
}
}
}
void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
SourceLocation LocStart = (*D.begin())->getLocStart();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
}
void
RewriteObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
SourceLocation LocStart = DG[0]->getLocStart();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
}
void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
const FunctionType *&FPRetType) {
if (T->isObjCQualifiedIdType())
ResultStr += "id";
else if (T->isFunctionPointerType() ||
T->isBlockPointerType()) {
// needs special handling, since pointer-to-functions have special
// syntax (where a decaration models use).
QualType retType = T;
QualType PointeeTy;
if (const PointerType* PT = retType->getAs<PointerType>())
PointeeTy = PT->getPointeeType();
else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
PointeeTy = BPT->getPointeeType();
if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
ResultStr +=
FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
ResultStr += "(*";
}
} else
ResultStr += T.getAsString(Context->getPrintingPolicy());
}
void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
ObjCMethodDecl *OMD,
std::string &ResultStr) {
//fprintf(stderr,"In RewriteObjCMethodDecl\n");
const FunctionType *FPRetType = nullptr;
ResultStr += "\nstatic ";
RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
ResultStr += " ";
// Unique method name
std::string NameStr;
if (OMD->isInstanceMethod())
NameStr += "_I_";
else
NameStr += "_C_";
NameStr += IDecl->getNameAsString();
NameStr += "_";
if (ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
NameStr += CID->getNameAsString();
NameStr += "_";
}
// Append selector names, replacing ':' with '_'
{
std::string selString = OMD->getSelector().getAsString();
int len = selString.size();
for (int i = 0; i < len; i++)
if (selString[i] == ':')
selString[i] = '_';
NameStr += selString;
}
// Remember this name for metadata emission
MethodInternalNames[OMD] = NameStr;
ResultStr += NameStr;
// Rewrite arguments
ResultStr += "(";
// invisible arguments
if (OMD->isInstanceMethod()) {
QualType selfTy = Context->getObjCInterfaceType(IDecl);
selfTy = Context->getPointerType(selfTy);
if (!LangOpts.MicrosoftExt) {
if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
ResultStr += "struct ";
}
// When rewriting for Microsoft, explicitly omit the structure name.
ResultStr += IDecl->getNameAsString();
ResultStr += " *";
}
else
ResultStr += Context->getObjCClassType().getAsString(
Context->getPrintingPolicy());
ResultStr += " self, ";
ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
ResultStr += " _cmd";
// Method arguments.
for (const auto *PDecl : OMD->params()) {
ResultStr += ", ";
if (PDecl->getType()->isObjCQualifiedIdType()) {
ResultStr += "id ";
ResultStr += PDecl->getNameAsString();
} else {
std::string Name = PDecl->getNameAsString();
QualType QT = PDecl->getType();
// Make sure we convert "t (^)(...)" to "t (*)(...)".
(void)convertBlockPointerToFunctionPointer(QT);
QT.getAsStringInternal(Name, Context->getPrintingPolicy());
ResultStr += Name;
}
}
if (OMD->isVariadic())
ResultStr += ", ...";
ResultStr += ") ";
if (FPRetType) {
ResultStr += ")"; // close the precedence "scope" for "*".
// Now, emit the argument types (if any).
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
ResultStr += "(";
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
if (i) ResultStr += ", ";
std::string ParamStr =
FT->getParamType(i).getAsString(Context->getPrintingPolicy());
ResultStr += ParamStr;
}
if (FT->isVariadic()) {
if (FT->getNumParams())
ResultStr += ", ";
ResultStr += "...";
}
ResultStr += ")";
} else {
ResultStr += "()";
}
}
}
void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// ");
for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
ReplaceText(LocStart, endBuf-startBuf, ResultStr);
}
for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
ReplaceText(LocStart, endBuf-startBuf, ResultStr);
}
for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
RewritePropertyImplDecl(I, IMD, CID);
InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
}
void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
std::string ResultStr;
if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) {
// we haven't seen a forward decl - generate a typedef.
ResultStr = "#ifndef _REWRITER_typedef_";
ResultStr += ClassDecl->getNameAsString();
ResultStr += "\n";
ResultStr += "#define _REWRITER_typedef_";
ResultStr += ClassDecl->getNameAsString();
ResultStr += "\n";
ResultStr += "typedef struct objc_object ";
ResultStr += ClassDecl->getNameAsString();
ResultStr += ";\n#endif\n";
// Mark this typedef as having been generated.
ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl());
}
RewriteObjCInternalStruct(ClassDecl, ResultStr);
for (auto *I : ClassDecl->properties())
RewriteProperty(I);
for (auto *I : ClassDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : ClassDecl->class_methods())
RewriteMethodDeclaration(I);
// Lastly, comment out the @end.
ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
"/* @end */");
}
Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
SourceRange OldRange = PseudoOp->getSourceRange();
// We just magically know some things about the structure of this
// expression.
ObjCMessageExpr *OldMsg =
cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
PseudoOp->getNumSemanticExprs() - 1));
// Because the rewriter doesn't allow us to rewrite rewritten code,
// we need to suppress rewriting the sub-statements.
Expr *Base, *RHS;
{
DisableReplaceStmtScope S(*this);
// Rebuild the base expression if we have one.
Base = nullptr;
if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
Base = OldMsg->getInstanceReceiver();
Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
// Rebuild the RHS.
RHS = cast<BinaryOperator>(PseudoOp->getSyntacticForm())->getRHS();
RHS = cast<OpaqueValueExpr>(RHS)->getSourceExpr();
RHS = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(RHS));
}
// TODO: avoid this copy.
SmallVector<SourceLocation, 1> SelLocs;
OldMsg->getSelectorLocs(SelLocs);
ObjCMessageExpr *NewMsg = nullptr;
switch (OldMsg->getReceiverKind()) {
case ObjCMessageExpr::Class:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getClassReceiverTypeInfo(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
RHS,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::Instance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
Base,
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
RHS,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getSuperLoc(),
OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
OldMsg->getSuperType(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
RHS,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
}
Stmt *Replacement = SynthMessageExpr(NewMsg);
ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
return Replacement;
}
Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
SourceRange OldRange = PseudoOp->getSourceRange();
// We just magically know some things about the structure of this
// expression.
ObjCMessageExpr *OldMsg =
cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
// Because the rewriter doesn't allow us to rewrite rewritten code,
// we need to suppress rewriting the sub-statements.
Expr *Base = nullptr;
{
DisableReplaceStmtScope S(*this);
// Rebuild the base expression if we have one.
if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
Base = OldMsg->getInstanceReceiver();
Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
}
// Intentionally empty.
SmallVector<SourceLocation, 1> SelLocs;
SmallVector<Expr*, 1> Args;
ObjCMessageExpr *NewMsg = nullptr;
switch (OldMsg->getReceiverKind()) {
case ObjCMessageExpr::Class:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getClassReceiverTypeInfo(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::Instance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
Base,
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getSuperLoc(),
OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
OldMsg->getSuperType(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
}
Stmt *Replacement = SynthMessageExpr(NewMsg);
ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
return Replacement;
}
/// SynthCountByEnumWithState - To print:
/// ((unsigned int (*)
/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
/// (void *)objc_msgSend)((id)l_collection,
/// sel_registerName(
/// "countByEnumeratingWithState:objects:count:"),
/// &enumState,
/// (id *)__rw_items, (unsigned int)16)
///
void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
"id *, unsigned int))(void *)objc_msgSend)";
buf += "\n\t\t";
buf += "((id)l_collection,\n\t\t";
buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
buf += "\n\t\t";
buf += "&enumState, "
"(id *)__rw_items, (unsigned int)16)";
}
/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
/// statement to exit to its outer synthesized loop.
///
Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) {
if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
return S;
// replace break with goto __break_label
std::string buf;
SourceLocation startLoc = S->getLocStart();
buf = "goto __break_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("break"), buf);
return nullptr;
}
/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
/// statement to continue with its inner synthesized loop.
///
Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
return S;
// replace continue with goto __continue_label
std::string buf;
SourceLocation startLoc = S->getLocStart();
buf = "goto __continue_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("continue"), buf);
return nullptr;
}
/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
/// It rewrites:
/// for ( type elem in collection) { stmts; }
/// Into:
/// {
/// type elem;
/// struct __objcFastEnumerationState enumState = { 0 };
/// id __rw_items[16];
/// id l_collection = (id)collection;
/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16];
/// if (limit) {
/// unsigned long startMutations = *enumState.mutationsPtr;
/// do {
/// unsigned long counter = 0;
/// do {
/// if (startMutations != *enumState.mutationsPtr)
/// objc_enumerationMutation(l_collection);
/// elem = (type)enumState.itemsPtr[counter++];
/// stmts;
/// __continue_label: ;
/// } while (counter < limit);
/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16]);
/// elem = nil;
/// __break_label: ;
/// }
/// else
/// elem = nil;
/// }
///
Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd) {
assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
"ObjCForCollectionStmt Statement stack mismatch");
assert(!ObjCBcLabelNo.empty() &&
"ObjCForCollectionStmt - Label No stack empty");
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
StringRef elementName;
std::string elementTypeAsString;
std::string buf;
buf = "\n{\n\t";
if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
// type elem;
NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
QualType ElementType = cast<ValueDecl>(D)->getType();
if (ElementType->isObjCQualifiedIdType() ||
ElementType->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
buf += elementTypeAsString;
buf += " ";
elementName = D->getName();
buf += elementName;
buf += ";\n\t";
}
else {
DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
elementName = DR->getDecl()->getName();
ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
if (VD->getType()->isObjCQualifiedIdType() ||
VD->getType()->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
}
// struct __objcFastEnumerationState enumState = { 0 };
buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
// id __rw_items[16];
buf += "id __rw_items[16];\n\t";
// id l_collection = (id)
buf += "id l_collection = (id)";
// Find start location of 'collection' the hard way!
const char *startCollectionBuf = startBuf;
startCollectionBuf += 3; // skip 'for'
startCollectionBuf = strchr(startCollectionBuf, '(');
startCollectionBuf++; // skip '('
// find 'in' and skip it.
while (*startCollectionBuf != ' ' ||
*(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
(*(startCollectionBuf+3) != ' ' &&
*(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
startCollectionBuf++;
startCollectionBuf += 3;
// Replace: "for (type element in" with string constructed thus far.
ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
// Replace ')' in for '(' type elem in collection ')' with ';'
SourceLocation rightParenLoc = S->getRParenLoc();
const char *rparenBuf = SM->getCharacterData(rightParenLoc);
SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
buf = ";\n\t";
// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
// objects:__rw_items count:16];
// which is synthesized into:
// unsigned int limit =
// ((unsigned int (*)
// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
// (void *)objc_msgSend)((id)l_collection,
// sel_registerName(
// "countByEnumeratingWithState:objects:count:"),
// (struct __objcFastEnumerationState *)&state,
// (id *)__rw_items, (unsigned int)16);
buf += "unsigned long limit =\n\t\t";
SynthCountByEnumWithState(buf);
buf += ";\n\t";
/// if (limit) {
/// unsigned long startMutations = *enumState.mutationsPtr;
/// do {
/// unsigned long counter = 0;
/// do {
/// if (startMutations != *enumState.mutationsPtr)
/// objc_enumerationMutation(l_collection);
/// elem = (type)enumState.itemsPtr[counter++];
buf += "if (limit) {\n\t";
buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
buf += "do {\n\t\t";
buf += "unsigned long counter = 0;\n\t\t";
buf += "do {\n\t\t\t";
buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
buf += elementName;
buf += " = (";
buf += elementTypeAsString;
buf += ")enumState.itemsPtr[counter++];";
// Replace ')' in for '(' type elem in collection ')' with all of these.
ReplaceText(lparenLoc, 1, buf);
/// __continue_label: ;
/// } while (counter < limit);
/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16]);
/// elem = nil;
/// __break_label: ;
/// }
/// else
/// elem = nil;
/// }
///
buf = ";\n\t";
buf += "__continue_label_";
buf += utostr(ObjCBcLabelNo.back());
buf += ": ;";
buf += "\n\t\t";
buf += "} while (counter < limit);\n\t";
buf += "} while (limit = ";
SynthCountByEnumWithState(buf);
buf += ");\n\t";
buf += elementName;
buf += " = ((";
buf += elementTypeAsString;
buf += ")0);\n\t";
buf += "__break_label_";
buf += utostr(ObjCBcLabelNo.back());
buf += ": ;\n\t";
buf += "}\n\t";
buf += "else\n\t\t";
buf += elementName;
buf += " = ((";
buf += elementTypeAsString;
buf += ")0);\n\t";
buf += "}\n";
// Insert all these *after* the statement body.
// FIXME: If this should support Obj-C++, support CXXTryStmt
if (isa<CompoundStmt>(S->getBody())) {
SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
InsertText(endBodyLoc, buf);
} else {
/* Need to treat single statements specially. For example:
*
* for (A *a in b) if (stuff()) break;
* for (A *a in b) xxxyy;
*
* The following code simply scans ahead to the semi to find the actual end.
*/
const char *stmtBuf = SM->getCharacterData(OrigEnd);
const char *semiBuf = strchr(stmtBuf, ';');
assert(semiBuf && "Can't find ';'");
SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
InsertText(endBodyLoc, buf);
}
Stmts.pop_back();
ObjCBcLabelNo.pop_back();
return nullptr;
}
/// RewriteObjCSynchronizedStmt -
/// This routine rewrites @synchronized(expr) stmt;
/// into:
/// objc_sync_enter(expr);
/// @try stmt @finally { objc_sync_exit(expr); }
///
Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synchronized location");
std::string buf;
buf = "objc_sync_enter((id)";
const char *lparenBuf = startBuf;
while (*lparenBuf != '(') lparenBuf++;
ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
// We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
// the sync expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
SourceLocation endLoc = S->getSynchBody()->getLocStart();
const char *endBuf = SM->getCharacterData(endLoc);
while (*endBuf != ')') endBuf--;
SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf);
buf = ");\n";
// declare a new scope with two variables, _stack and _rethrow.
buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n";
buf += "int buf[18/*32-bit i386*/];\n";
buf += "char *pointers[4];} _stack;\n";
buf += "id volatile _rethrow = 0;\n";
buf += "objc_exception_try_enter(&_stack);\n";
buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
ReplaceText(rparenLoc, 1, buf);
startLoc = S->getSynchBody()->getLocEnd();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '}') && "bogus @synchronized block");
SourceLocation lastCurlyLoc = startLoc;
buf = "}\nelse {\n";
buf += " _rethrow = objc_exception_extract(&_stack);\n";
buf += "}\n";
buf += "{ /* implicit finally clause */\n";
buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
std::string syncBuf;
syncBuf += " objc_sync_exit(";
Expr *syncExpr = S->getSynchExpr();
CastKind CK = syncExpr->getType()->isObjCObjectPointerType()
? CK_BitCast :
syncExpr->getType()->isBlockPointerType()
? CK_BlockPointerToObjCPointerCast
: CK_CPointerToObjCPointerCast;
syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK, syncExpr);
std::string syncExprBufS;
llvm::raw_string_ostream syncExprBuf(syncExprBufS);
assert(syncExpr != nullptr && "Expected non-null Expr");
syncExpr->printPretty(syncExprBuf, nullptr, PrintingPolicy(LangOpts));
syncBuf += syncExprBuf.str();
syncBuf += ");";
buf += syncBuf;
buf += "\n if (_rethrow) objc_exception_throw(_rethrow);\n";
buf += "}\n";
buf += "}";
ReplaceText(lastCurlyLoc, 1, buf);
bool hasReturns = false;
HasReturnStmts(S->getSynchBody(), hasReturns);
if (hasReturns)
RewriteSyncReturnStmts(S->getSynchBody(), syncBuf);
return nullptr;
}
void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
{
// Perform a bottom up traversal of all children.
for (Stmt *SubStmt : S->children())
if (SubStmt)
WarnAboutReturnGotoStmts(SubStmt);
if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
Diags.Report(Context->getFullLoc(S->getLocStart()),
TryFinallyContainsReturnDiag);
}
return;
}
void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns)
{
// Perform a bottom up traversal of all children.
for (Stmt *SubStmt : S->children())
if (SubStmt)
HasReturnStmts(SubStmt, hasReturns);
if (isa<ReturnStmt>(S))
hasReturns = true;
return;
}
void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
// Perform a bottom up traversal of all children.
for (Stmt *SubStmt : S->children())
if (SubStmt) {
RewriteTryReturnStmts(SubStmt);
}
if (isa<ReturnStmt>(S)) {
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'");
SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
std::string buf;
buf = "{ objc_exception_try_exit(&_stack); return";
ReplaceText(startLoc, 6, buf);
InsertText(onePastSemiLoc, "}");
}
return;
}
void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
// Perform a bottom up traversal of all children.
for (Stmt *SubStmt : S->children())
if (SubStmt) {
RewriteSyncReturnStmts(SubStmt, syncExitBuf);
}
if (isa<ReturnStmt>(S)) {
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "RewriteSyncReturnStmts: can't find ';'");
SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
std::string buf;
buf = "{ objc_exception_try_exit(&_stack);";
buf += syncExitBuf;
buf += " return";
ReplaceText(startLoc, 6, buf);
InsertText(onePastSemiLoc, "}");
}
return;
}
Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @try location");
std::string buf;
// declare a new scope with two variables, _stack and _rethrow.
buf = "/* @try scope begin */ { struct _objc_exception_data {\n";
buf += "int buf[18/*32-bit i386*/];\n";
buf += "char *pointers[4];} _stack;\n";
buf += "id volatile _rethrow = 0;\n";
buf += "objc_exception_try_enter(&_stack);\n";
buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
ReplaceText(startLoc, 4, buf);
startLoc = S->getTryBody()->getLocEnd();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '}') && "bogus @try block");
SourceLocation lastCurlyLoc = startLoc;
if (S->getNumCatchStmts()) {
startLoc = startLoc.getLocWithOffset(1);
buf = " /* @catch begin */ else {\n";
buf += " id _caught = objc_exception_extract(&_stack);\n";
buf += " objc_exception_try_enter (&_stack);\n";
buf += " if (_setjmp(_stack.buf))\n";
buf += " _rethrow = objc_exception_extract(&_stack);\n";
buf += " else { /* @catch continue */";
InsertText(startLoc, buf);
} else { /* no catch list */
buf = "}\nelse {\n";
buf += " _rethrow = objc_exception_extract(&_stack);\n";
buf += "}";
ReplaceText(lastCurlyLoc, 1, buf);
}
Stmt *lastCatchBody = nullptr;
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
VarDecl *catchDecl = Catch->getCatchParamDecl();
if (I == 0)
buf = "if ("; // we are generating code for the first catch clause
else
buf = "else if (";
startLoc = Catch->getLocStart();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @catch location");
const char *lParenLoc = strchr(startBuf, '(');
if (Catch->hasEllipsis()) {
// Now rewrite the body...
lastCatchBody = Catch->getCatchBody();
SourceLocation bodyLoc = lastCatchBody->getLocStart();
const char *bodyBuf = SM->getCharacterData(bodyLoc);
assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' &&
"bogus @catch paren location");
assert((*bodyBuf == '{') && "bogus @catch body location");
buf += "1) { id _tmp = _caught;";
Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1, buf);
} else if (catchDecl) {
QualType t = catchDecl->getType();
if (t == Context->getObjCIdType()) {
buf += "1) { ";
ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
} else if (const ObjCObjectPointerType *Ptr =
t->getAs<ObjCObjectPointerType>()) {
// Should be a pointer to a class.
ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
if (IDecl) {
buf += "objc_exception_match((struct objc_class *)objc_getClass(\"";
buf += IDecl->getNameAsString();
buf += "\"), (struct objc_object *)_caught)) { ";
ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
}
}
// Now rewrite the body...
lastCatchBody = Catch->getCatchBody();
SourceLocation rParenLoc = Catch->getRParenLoc();
SourceLocation bodyLoc = lastCatchBody->getLocStart();
const char *bodyBuf = SM->getCharacterData(bodyLoc);
const char *rParenBuf = SM->getCharacterData(rParenLoc);
assert((*rParenBuf == ')') && "bogus @catch paren location");
assert((*bodyBuf == '{') && "bogus @catch body location");
// Here we replace ") {" with "= _caught;" (which initializes and
// declares the @catch parameter).
ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, " = _caught;");
} else {
llvm_unreachable("@catch rewrite bug");
}
}
// Complete the catch list...
if (lastCatchBody) {
SourceLocation bodyLoc = lastCatchBody->getLocEnd();
assert(*SM->getCharacterData(bodyLoc) == '}' &&
"bogus @catch body location");
// Insert the last (implicit) else clause *before* the right curly brace.
bodyLoc = bodyLoc.getLocWithOffset(-1);
buf = "} /* last catch end */\n";
buf += "else {\n";
buf += " _rethrow = _caught;\n";
buf += " objc_exception_try_exit(&_stack);\n";
buf += "} } /* @catch end */\n";
if (!S->getFinallyStmt())
buf += "}\n";
InsertText(bodyLoc, buf);
// Set lastCurlyLoc
lastCurlyLoc = lastCatchBody->getLocEnd();
}
if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) {
startLoc = finalStmt->getLocStart();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @finally start");
ReplaceText(startLoc, 8, "/* @finally */");
Stmt *body = finalStmt->getFinallyBody();
SourceLocation startLoc = body->getLocStart();
SourceLocation endLoc = body->getLocEnd();
assert(*SM->getCharacterData(startLoc) == '{' &&
"bogus @finally body location");
assert(*SM->getCharacterData(endLoc) == '}' &&
"bogus @finally body location");
startLoc = startLoc.getLocWithOffset(1);
InsertText(startLoc, " if (!_rethrow) objc_exception_try_exit(&_stack);\n");
endLoc = endLoc.getLocWithOffset(-1);
InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n");
// Set lastCurlyLoc
lastCurlyLoc = body->getLocEnd();
// Now check for any return/continue/go statements within the @try.
WarnAboutReturnGotoStmts(S->getTryBody());
} else { /* no finally clause - make sure we synthesize an implicit one */
buf = "{ /* implicit finally clause */\n";
buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
buf += " if (_rethrow) objc_exception_throw(_rethrow);\n";
buf += "}";
ReplaceText(lastCurlyLoc, 1, buf);
// Now check for any return/continue/go statements within the @try.
// The implicit finally clause won't called if the @try contains any
// jump statements.
bool hasReturns = false;
HasReturnStmts(S->getTryBody(), hasReturns);
if (hasReturns)
RewriteTryReturnStmts(S->getTryBody());
}
// Now emit the final closing curly brace...
lastCurlyLoc = lastCurlyLoc.getLocWithOffset(1);
InsertText(lastCurlyLoc, " } /* @try scope end */\n");
return nullptr;
}
// This can't be done with ReplaceStmt(S, ThrowExpr), since
// the throw expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @throw location");
std::string buf;
/* void objc_exception_throw(id) __attribute__((noreturn)); */
if (S->getThrowExpr())
buf = "objc_exception_throw(";
else // add an implicit argument
buf = "objc_exception_throw(_caught";
// handle "@ throw" correctly.
const char *wBuf = strchr(startBuf, 'w');
assert((*wBuf == 'w') && "@throw: can't find 'w'");
ReplaceText(startLoc, wBuf-startBuf+1, buf);
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "@throw: can't find ';'");
SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
ReplaceText(semiLoc, 1, ");");
return nullptr;
}
Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
// Create a new string expression.
std::string StrEncoding;
Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
Expr *Replacement = getStringLiteral(StrEncoding);
ReplaceStmt(Exp, Replacement);
// Replace this subexpr in the parent.
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return Replacement;
}
Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
// Create a call to sel_registerName("selName").
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size());
ReplaceStmt(Exp, SelExp);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return SelExp;
}
CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
SourceLocation EndLoc) {
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType,
VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
DRE, nullptr, VK_RValue);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
FT->getCallResultType(*Context),
VK_RValue, EndLoc);
return Exp;
}
static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
const char *&startRef, const char *&endRef) {
while (startBuf < endBuf) {
if (*startBuf == '<')
startRef = startBuf; // mark the start.
if (*startBuf == '>') {
if (startRef && *startRef == '<') {
endRef = startBuf; // mark the end.
return true;
}
return false;
}
startBuf++;
}
return false;
}
static void scanToNextArgument(const char *&argRef) {
int angle = 0;
while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
if (*argRef == '<')
angle++;
else if (*argRef == '>')
angle--;
argRef++;
}
assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
}
bool RewriteObjC::needToScanForQualifiers(QualType T) {
if (T->isObjCQualifiedIdType())
return true;
if (const PointerType *PT = T->getAs<PointerType>()) {
if (PT->getPointeeType()->isObjCQualifiedIdType())
return true;
}
if (T->isObjCObjectPointerType()) {
T = T->getPointeeType();
return T->isObjCQualifiedInterfaceType();
}
if (T->isArrayType()) {
QualType ElemTy = Context->getBaseElementType(T);
return needToScanForQualifiers(ElemTy);
}
return false;
}
void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
QualType Type = E->getType();
if (needToScanForQualifiers(Type)) {
SourceLocation Loc, EndLoc;
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
Loc = ECE->getLParenLoc();
EndLoc = ECE->getRParenLoc();
} else {
Loc = E->getLocStart();
EndLoc = E->getLocEnd();
}
// This will defend against trying to rewrite synthesized expressions.
if (Loc.isInvalid() || EndLoc.isInvalid())
return;
const char *startBuf = SM->getCharacterData(Loc);
const char *endBuf = SM->getCharacterData(EndLoc);
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
}
}
void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
SourceLocation Loc;
QualType Type;
const FunctionProtoType *proto = nullptr;
if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
Loc = VD->getLocation();
Type = VD->getType();
}
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
Loc = FD->getLocation();
// Check for ObjC 'id' and class types that have been adorned with protocol
// information (id<p>, C<p>*). The protocol references need to be rewritten!
const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
assert(funcType && "missing function type");
proto = dyn_cast<FunctionProtoType>(funcType);
if (!proto)
return;
Type = proto->getReturnType();
}
else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
Loc = FD->getLocation();
Type = FD->getType();
}
else
return;
if (needToScanForQualifiers(Type)) {
// Since types are unique, we need to scan the buffer.
const char *endBuf = SM->getCharacterData(Loc);
const char *startBuf = endBuf;
while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
startBuf--; // scan backward (from the decl location) for return type.
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
}
if (!proto)
return; // most likely, was a variable
// Now check arguments.
const char *startBuf = SM->getCharacterData(Loc);
const char *startFuncBuf = startBuf;
for (unsigned i = 0; i < proto->getNumParams(); i++) {
if (needToScanForQualifiers(proto->getParamType(i))) {
// Since types are unique, we need to scan the buffer.
const char *endBuf = startBuf;
// scan forward (from the decl location) for argument types.
scanToNextArgument(endBuf);
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc =
Loc.getLocWithOffset(startRef-startFuncBuf);
SourceLocation GreaterLoc =
Loc.getLocWithOffset(endRef-startFuncBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
startBuf = ++endBuf;
}
else {
// If the function name is derived from a macro expansion, then the
// argument buffer will not follow the name. Need to speak with Chris.
while (*startBuf && *startBuf != ')' && *startBuf != ',')
startBuf++; // scan forward (from the decl location) for argument types.
startBuf++;
}
}
}
void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) {
QualType QT = ND->getType();
const Type* TypePtr = QT->getAs<Type>();
if (!isa<TypeOfExprType>(TypePtr))
return;
while (isa<TypeOfExprType>(TypePtr)) {
const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
TypePtr = QT->getAs<Type>();
}
// FIXME. This will not work for multiple declarators; as in:
// __typeof__(a) b,c,d;
std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
const char *startBuf = SM->getCharacterData(DeclLoc);
if (ND->getInit()) {
std::string Name(ND->getNameAsString());
TypeAsString += " " + Name + " = ";
Expr *E = ND->getInit();
SourceLocation startLoc;
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
startLoc = E->getLocStart();
startLoc = SM->getExpansionLoc(startLoc);
const char *endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
else {
SourceLocation X = ND->getLocEnd();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
}
// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
void RewriteObjC::SynthSelGetUidFunctionDecl() {
IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getFuncType =
getSimpleFunctionType(Context->getObjCSelType(), ArgTys);
SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
SelGetUidIdent, getFuncType,
nullptr, SC_Extern);
}
void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) {
// declared in <objc/objc.h>
if (FD->getIdentifier() &&
FD->getName() == "sel_registerName") {
SelGetUidFunctionDecl = FD;
return;
}
RewriteObjCQualifiedInterfaceTypes(FD);
}
void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
const char *argPtr = TypeString.c_str();
if (!strchr(argPtr, '^')) {
Str += TypeString;
return;
}
while (*argPtr) {
Str += (*argPtr == '^' ? '*' : *argPtr);
argPtr++;
}
}
// FIXME. Consolidate this routine with RewriteBlockPointerType.
void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str,
ValueDecl *VD) {
QualType Type = VD->getType();
std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
const char *argPtr = TypeString.c_str();
int paren = 0;
while (*argPtr) {
switch (*argPtr) {
case '(':
Str += *argPtr;
paren++;
break;
case ')':
Str += *argPtr;
paren--;
break;
case '^':
Str += '*';
if (paren == 1)
Str += VD->getNameAsString();
break;
default:
Str += *argPtr;
break;
}
argPtr++;
}
}
void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
if (!proto)
return;
QualType Type = proto->getReturnType();
std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
FdStr += " ";
FdStr += FD->getName();
FdStr += "(";
unsigned numArgs = proto->getNumParams();
for (unsigned i = 0; i < numArgs; i++) {
QualType ArgType = proto->getParamType(i);
RewriteBlockPointerType(FdStr, ArgType);
if (i+1 < numArgs)
FdStr += ", ";
}
FdStr += ");\n";
InsertText(FunLocStart, FdStr);
CurFunctionDeclToDeclareForBlock = nullptr;
}
// SynthSuperConstructorFunctionDecl - id objc_super(id obj, id super);
void RewriteObjC::SynthSuperConstructorFunctionDecl() {
if (SuperConstructorFunctionDecl)
return;
IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys);
SuperConstructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
void RewriteObjC::SynthMsgSendFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
SmallVector<QualType, 16> ArgTys;
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
void RewriteObjC::SynthMsgSendStretFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendSuperStretFunctionDecl -
// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
IdentifierInfo *msgSendIdent =
&Context->Idents.get("objc_msgSendSuper_stret");
SmallVector<QualType, 16> ArgTys;
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent,
msgSendType, nullptr,
SC_Extern);
}
// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
ArgTys, /*isVariadic=*/true);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
void RewriteObjC::SynthGetClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys);
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getClassIdent, getClassType,
nullptr, SC_Extern);
}
// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
void RewriteObjC::SynthGetSuperClassFunctionDecl() {
IdentifierInfo *getSuperClassIdent =
&Context->Idents.get("class_getSuperclass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getObjCClassType());
QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
ArgTys);
GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getSuperClassIdent,
getClassType, nullptr,
SC_Extern);
}
// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
void RewriteObjC::SynthGetMetaClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys);
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getClassIdent, getClassType,
nullptr, SC_Extern);
}
Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
assert(Exp != nullptr && "Expected non-null ObjCStringLiteral");
QualType strType = getConstantStringStructType();
std::string S = "__NSConstantStringImpl_";
std::string tmpName = InFileName;
unsigned i;
for (i=0; i < tmpName.length(); i++) {
char c = tmpName.at(i);
// replace any non-alphanumeric characters with '_'.
if (!isAlphanumeric(c))
tmpName[i] = '_';
}
S += tmpName;
S += "_";
S += utostr(NumObjCStringLiterals++);
Preamble += "static __NSConstantStringImpl " + S;
Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
Preamble += "0x000007c8,"; // utf8_str
// The pretty printer for StringLiteral handles escape characters properly.
std::string prettyBufS;
llvm::raw_string_ostream prettyBuf(prettyBufS);
Exp->getString()->printPretty(prettyBuf, nullptr, PrintingPolicy(LangOpts));
Preamble += prettyBuf.str();
Preamble += ",";
Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(S),
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
SourceLocation());
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
ReplaceStmt(Exp, cast);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return cast;
}
// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
QualType RewriteObjC::getSuperStructType() {
if (!SuperStructDecl) {
SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType FieldTypes[2];
// struct objc_object *receiver;
FieldTypes[0] = Context->getObjCIdType();
// struct objc_class *super;
FieldTypes[1] = Context->getObjCClassType();
// Create fields
for (unsigned i = 0; i < 2; ++i) {
SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit));
}
SuperStructDecl->completeDefinition();
}
return Context->getTagDeclType(SuperStructDecl);
}
QualType RewriteObjC::getConstantStringStructType() {
if (!ConstantStringDecl) {
ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__NSConstantStringImpl"));
QualType FieldTypes[4];
// struct objc_object *receiver;
FieldTypes[0] = Context->getObjCIdType();
// int flags;
FieldTypes[1] = Context->IntTy;
// char *str;
FieldTypes[2] = Context->getPointerType(Context->CharTy);
// long length;
FieldTypes[3] = Context->LongTy;
// Create fields
for (unsigned i = 0; i < 4; ++i) {
ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
ConstantStringDecl,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true,
ICIS_NoInit));
}
ConstantStringDecl->completeDefinition();
}
return Context->getTagDeclType(ConstantStringDecl);
}
CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
QualType msgSendType,
QualType returnType,
SmallVectorImpl<QualType> &ArgTypes,
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method) {
// Create a reference to the objc_msgSend_stret() declaration.
DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
false, msgSendType,
VK_LValue, SourceLocation());
// Need to cast objc_msgSend_stret to "void *" (see above comment).
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, STDRE);
// Now do the "normal" pointer to function cast.
QualType castType = getSimpleFunctionType(returnType, ArgTypes,
Method ? Method->isVariadic()
: false);
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *STCE = new (Context) CallExpr(
*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, SourceLocation());
return STCE;
}
Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc,
SourceLocation EndLoc) {
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
if (!MsgSendFunctionDecl)
SynthMsgSendFunctionDecl();
if (!MsgSendSuperFunctionDecl)
SynthMsgSendSuperFunctionDecl();
if (!MsgSendStretFunctionDecl)
SynthMsgSendStretFunctionDecl();
if (!MsgSendSuperStretFunctionDecl)
SynthMsgSendSuperStretFunctionDecl();
if (!MsgSendFpretFunctionDecl)
SynthMsgSendFpretFunctionDecl();
if (!GetClassFunctionDecl)
SynthGetClassFunctionDecl();
if (!GetSuperClassFunctionDecl)
SynthGetSuperClassFunctionDecl();
if (!GetMetaClassFunctionDecl)
SynthGetMetaClassFunctionDecl();
// default to objc_msgSend().
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
// May need to use objc_msgSend_stret() as well.
FunctionDecl *MsgSendStretFlavor = nullptr;
if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
QualType resultType = mDecl->getReturnType();
if (resultType->isRecordType())
MsgSendStretFlavor = MsgSendStretFunctionDecl;
else if (resultType->isRealFloatingType())
MsgSendFlavor = MsgSendFpretFunctionDecl;
}
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 8> MsgExprs;
switch (Exp->getReceiverKind()) {
case ObjCMessageExpr::SuperClass: {
MsgSendFlavor = MsgSendSuperFunctionDecl;
if (MsgSendStretFlavor)
MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
// set the receiver to self, the first argument to all methods.
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue,
SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc,
EndLoc);
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
// To turn off a warning, type-cast to 'id'
InitExprs.push_back( // set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context,
Context->getObjCIdType(),
CK_BitCast, Cls));
// struct objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue,
SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
superType, VK_LValue,
ILE, false);
// struct objc_super *
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
}
MsgExprs.push_back(SuperRep);
break;
}
case ObjCMessageExpr::Class: {
SmallVector<Expr*, 8> ClsExprs;
ObjCInterfaceDecl *Class
= Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
break;
}
case ObjCMessageExpr::SuperInstance:{
MsgSendFlavor = MsgSendSuperFunctionDecl;
if (MsgSendStretFlavor)
MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue, SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
// To turn off a warning, type-cast to 'id'
InitExprs.push_back(
// set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast, Cls));
// struct objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
} else {
// (struct objc_super) { <exprs from above> }
InitListExpr *ILE =
new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
superType, VK_RValue, ILE,
false);
}
MsgExprs.push_back(SuperRep);
break;
}
case ObjCMessageExpr::Instance: {
// Remove all type-casts because it may contain objc-style types; e.g.
// Foo<Proto> *.
Expr *recExpr = Exp->getInstanceReceiver();
while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
recExpr = CE->getSubExpr();
CastKind CK = recExpr->getType()->isObjCObjectPointerType()
? CK_BitCast : recExpr->getType()->isBlockPointerType()
? CK_BlockPointerToObjCPointerCast
: CK_CPointerToObjCPointerCast;
recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK, recExpr);
MsgExprs.push_back(recExpr);
break;
}
}
// Create a call to sel_registerName("selName"), it will be the 2nd argument.
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
StartLoc,
EndLoc);
MsgExprs.push_back(SelExp);
// Now push any user supplied arguments.
for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
Expr *userExpr = Exp->getArg(i);
// Make all implicit casts explicit...ICE comes in handy:-)
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
// Reuse the ICE type, it is exactly what the doctor ordered.
QualType type = ICE->getType();
if (needToScanForQualifiers(type))
type = Context->getObjCIdType();
// Make sure we convert "type (^)(...)" to "type (*)(...)".
(void)convertBlockPointerToFunctionPointer(type);
const Expr *SubExpr = ICE->IgnoreParenImpCasts();
CastKind CK;
if (SubExpr->getType()->isIntegralType(*Context) &&
type->isBooleanType()) {
CK = CK_IntegralToBoolean;
} else if (type->isObjCObjectPointerType()) {
if (SubExpr->getType()->isBlockPointerType()) {
CK = CK_BlockPointerToObjCPointerCast;
} else if (SubExpr->getType()->isPointerType()) {
CK = CK_CPointerToObjCPointerCast;
} else {
CK = CK_BitCast;
}
} else {
CK = CK_BitCast;
}
userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
}
// Make id<P...> cast into an 'id' cast.
else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
if (CE->getType()->isObjCQualifiedIdType()) {
while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
userExpr = CE->getSubExpr();
CastKind CK;
if (userExpr->getType()->isIntegralType(*Context)) {
CK = CK_IntegralToPointer;
} else if (userExpr->getType()->isBlockPointerType()) {
CK = CK_BlockPointerToObjCPointerCast;
} else if (userExpr->getType()->isPointerType()) {
CK = CK_CPointerToObjCPointerCast;
} else {
CK = CK_BitCast;
}
userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK, userExpr);
}
}
MsgExprs.push_back(userExpr);
// We've transferred the ownership to MsgExprs. For now, we *don't* null
// out the argument in the original expression (since we aren't deleting
// the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
//Exp->setArg(i, 0);
}
// Generate the funky cast.
CastExpr *cast;
SmallVector<QualType, 8> ArgTypes;
QualType returnType;
// Push 'id' and 'SEL', the 2 implicit arguments.
if (MsgSendFlavor == MsgSendSuperFunctionDecl)
ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
else
ArgTypes.push_back(Context->getObjCIdType());
ArgTypes.push_back(Context->getObjCSelType());
if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
// Push any user argument types.
for (const auto *PI : OMD->params()) {
QualType t = PI->getType()->isObjCQualifiedIdType()
? Context->getObjCIdType()
: PI->getType();
// Make sure we convert "t (^)(...)" to "t (*)(...)".
(void)convertBlockPointerToFunctionPointer(t);
ArgTypes.push_back(t);
}
returnType = Exp->getType();
convertToUnqualifiedObjCType(returnType);
(void)convertBlockPointerToFunctionPointer(returnType);
} else {
returnType = Context->getObjCIdType();
}
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
// If we don't do this cast, we get the following bizarre warning/note:
// xx.m:13: warning: function called through a non-compatible type
// xx.m:13: note: if this code is reached, the program will abort
cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
// If we don't have a method decl, force a variadic cast.
const ObjCMethodDecl *MD = Exp->getMethodDecl();
QualType castType =
getSimpleFunctionType(returnType, ArgTypes, MD ? MD->isVariadic() : true);
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context)
CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
// call to objc_msgSend_stret and hang both varieties on a conditional
// expression which dictate which one to envoke depending on size of
// method's return type.
CallExpr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
msgSendType, returnType,
ArgTypes, MsgExprs,
Exp->getMethodDecl());
// Build sizeof(returnType)
UnaryExprOrTypeTraitExpr *sizeofExpr =
new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf,
Context->getTrivialTypeSourceInfo(returnType),
Context->getSizeType(), SourceLocation(),
SourceLocation());
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
// FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
// For X86 it is more complicated and some kind of target specific routine
// is needed to decide what to do.
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
IntegerLiteral *limit = IntegerLiteral::Create(*Context,
llvm::APInt(IntSize, 8),
Context->IntTy,
SourceLocation());
BinaryOperator *lessThanExpr =
new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
VK_RValue, OK_Ordinary, SourceLocation(),
false);
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
SourceLocation(), CE,
SourceLocation(), STCE,
returnType, VK_RValue, OK_Ordinary);
ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
CondExpr);
}
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
Exp->getLocEnd());
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
// typedef struct objc_object Protocol;
QualType RewriteObjC::getProtocolType() {
if (!ProtocolTypeDecl) {
TypeSourceInfo *TInfo
= Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("Protocol"),
TInfo);
}
return Context->getTypeDeclType(ProtocolTypeDecl);
}
/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
/// a synthesized/forward data reference (to the protocol's metadata).
/// The forward references (and metadata) are generated in
/// RewriteObjC::HandleTranslationUnit().
Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString();
IdentifierInfo *ID = &Context->Idents.get(Name);
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, getProtocolType(),
nullptr, SC_Extern);
DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
VK_LValue, SourceLocation());
Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
CK_BitCast,
DerefExpr);
ReplaceStmt(Exp, castExpr);
ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return castExpr;
}
bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf,
const char *endBuf) {
while (startBuf < endBuf) {
if (*startBuf == '#') {
// Skip whitespace.
for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
;
if (!strncmp(startBuf, "if", strlen("if")) ||
!strncmp(startBuf, "ifdef", strlen("ifdef")) ||
!strncmp(startBuf, "ifndef", strlen("ifndef")) ||
!strncmp(startBuf, "define", strlen("define")) ||
!strncmp(startBuf, "undef", strlen("undef")) ||
!strncmp(startBuf, "else", strlen("else")) ||
!strncmp(startBuf, "elif", strlen("elif")) ||
!strncmp(startBuf, "endif", strlen("endif")) ||
!strncmp(startBuf, "pragma", strlen("pragma")) ||
!strncmp(startBuf, "include", strlen("include")) ||
!strncmp(startBuf, "import", strlen("import")) ||
!strncmp(startBuf, "include_next", strlen("include_next")))
return true;
}
startBuf++;
}
return false;
}
/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
/// an objective-c class with ivars.
void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result) {
assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
assert(CDecl->getName() != "" &&
"Name missing in SynthesizeObjCInternalStruct");
// Do not synthesize more than once.
if (ObjCSynthesizedStructs.count(CDecl))
return;
ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
int NumIvars = CDecl->ivar_size();
SourceLocation LocStart = CDecl->getLocStart();
SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
// If no ivars and no root or if its root, directly or indirectly,
// have no ivars (thus not synthesized) then no need to synthesize this class.
if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) &&
(!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
ReplaceText(LocStart, endBuf-startBuf, Result);
return;
}
// FIXME: This has potential of causing problem. If
// SynthesizeObjCInternalStruct is ever called recursively.
Result += "\nstruct ";
Result += CDecl->getNameAsString();
if (LangOpts.MicrosoftExt)
Result += "_IMPL";
if (NumIvars > 0) {
const char *cursor = strchr(startBuf, '{');
assert((cursor && endBuf)
&& "SynthesizeObjCInternalStruct - malformed @interface");
// If the buffer contains preprocessor directives, we do more fine-grained
// rewrites. This is intended to fix code that looks like (which occurs in
// NSURL.h, for example):
//
// #ifdef XYZ
// @interface Foo : NSObject
// #else
// @interface FooBar : NSObject
// #endif
// {
// int i;
// }
// @end
//
// This clause is segregated to avoid breaking the common case.
if (BufferContainsPPDirectives(startBuf, cursor)) {
SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() :
CDecl->getAtStartLoc();
const char *endHeader = SM->getCharacterData(L);
endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts);
if (CDecl->protocol_begin() != CDecl->protocol_end()) {
// advance to the end of the referenced protocols.
while (endHeader < cursor && *endHeader != '>') endHeader++;
endHeader++;
}
// rewrite the original header
ReplaceText(LocStart, endHeader-startBuf, Result);
} else {
// rewrite the original header *without* disturbing the '{'
ReplaceText(LocStart, cursor-startBuf, Result);
}
if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
Result = "\n struct ";
Result += RCDecl->getNameAsString();
Result += "_IMPL ";
Result += RCDecl->getNameAsString();
Result += "_IVARS;\n";
// insert the super class structure definition.
SourceLocation OnePastCurly =
LocStart.getLocWithOffset(cursor-startBuf+1);
InsertText(OnePastCurly, Result);
}
cursor++; // past '{'
// Now comment out any visibility specifiers.
while (cursor < endBuf) {
if (*cursor == '@') {
SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
// Skip whitespace.
for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor)
/*scan*/;
// FIXME: presence of @public, etc. inside comment results in
// this transformation as well, which is still correct c-code.
if (!strncmp(cursor, "public", strlen("public")) ||
!strncmp(cursor, "private", strlen("private")) ||
!strncmp(cursor, "package", strlen("package")) ||
!strncmp(cursor, "protected", strlen("protected")))
InsertText(atLoc, "// ");
}
// FIXME: If there are cases where '<' is used in ivar declaration part
// of user code, then scan the ivar list and use needToScanForQualifiers
// for type checking.
else if (*cursor == '<') {
SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
InsertText(atLoc, "/* ");
cursor = strchr(cursor, '>');
cursor++;
atLoc = LocStart.getLocWithOffset(cursor-startBuf);
InsertText(atLoc, " */");
} else if (*cursor == '^') { // rewrite block specifier.
SourceLocation caretLoc = LocStart.getLocWithOffset(cursor-startBuf);
ReplaceText(caretLoc, 1, "*");
}
cursor++;
}
// Don't forget to add a ';'!!
InsertText(LocEnd.getLocWithOffset(1), ";");
} else { // we don't have any instance variables - insert super struct.
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
Result += " {\n struct ";
Result += RCDecl->getNameAsString();
Result += "_IMPL ";
Result += RCDecl->getNameAsString();
Result += "_IVARS;\n};\n";
ReplaceText(LocStart, endBuf-startBuf, Result);
}
// Mark this struct as having been generated.
if (!ObjCSynthesizedStructs.insert(CDecl).second)
llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct");
}
//===----------------------------------------------------------------------===//
// Meta Data Emission
//===----------------------------------------------------------------------===//
/// RewriteImplementations - This routine rewrites all method implementations
/// and emits meta-data.
void RewriteObjC::RewriteImplementations() {
int ClsDefCount = ClassImplementation.size();
int CatDefCount = CategoryImplementation.size();
// Rewrite implemented methods
for (int i = 0; i < ClsDefCount; i++)
RewriteImplementationDecl(ClassImplementation[i]);
for (int i = 0; i < CatDefCount; i++)
RewriteImplementationDecl(CategoryImplementation[i]);
}
void RewriteObjC::RewriteByRefString(std::string &ResultStr,
const std::string &Name,
ValueDecl *VD, bool def) {
assert(BlockByRefDeclNo.count(VD) &&
"RewriteByRefString: ByRef decl missing");
if (def)
ResultStr += "struct ";
ResultStr += "__Block_byref_" + Name +
"_" + utostr(BlockByRefDeclNo[VD]) ;
}
static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
return false;
}
std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
StringRef funcName,
std::string Tag) {
const FunctionType *AFT = CE->getFunctionType();
QualType RT = AFT->getReturnType();
std::string StructRef = "struct " + Tag;
std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
funcName.str() + "_" + "block_func_" + utostr(i);
BlockDecl *BD = CE->getBlockDecl();
if (isa<FunctionNoProtoType>(AFT)) {
// No user-supplied arguments. Still need to pass in a pointer to the
// block (to reference imported block decl refs).
S += "(" + StructRef + " *__cself)";
} else if (BD->param_empty()) {
S += "(" + StructRef + " *__cself)";
} else {
const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
assert(FT && "SynthesizeBlockFunc: No function proto");
S += '(';
// first add the implicit argument.
S += StructRef + " *__cself, ";
std::string ParamStr;
for (BlockDecl::param_iterator AI = BD->param_begin(),
E = BD->param_end(); AI != E; ++AI) {
if (AI != BD->param_begin()) S += ", ";
ParamStr = (*AI)->getNameAsString();
QualType QT = (*AI)->getType();
(void)convertBlockPointerToFunctionPointer(QT);
QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
S += ParamStr;
}
if (FT->isVariadic()) {
if (!BD->param_empty()) S += ", ";
S += "...";
}
S += ')';
}
S += " {\n";
// Create local declarations to avoid rewriting all closure decl ref exprs.
// First, emit a declaration for all "by ref" decls.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
S += " ";
std::string Name = (*I)->getNameAsString();
std::string TypeString;
RewriteByRefString(TypeString, Name, (*I));
TypeString += " *";
Name = TypeString + Name;
S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
}
// Next, emit a declaration for all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
S += " ";
// Handle nested closure invocation. For example:
//
// void (^myImportedClosure)(void);
// myImportedClosure = ^(void) { setGlobalInt(x + y); };
//
// void (^anotherClosure)(void);
// anotherClosure = ^(void) {
// myImportedClosure(); // import and invoke the closure
// };
//
if (isTopLevelBlockPointerType((*I)->getType())) {
RewriteBlockPointerTypeVariable(S, (*I));
S += " = (";
RewriteBlockPointerType(S, (*I)->getType());
S += ")";
S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
}
else {
std::string Name = (*I)->getNameAsString();
QualType QT = (*I)->getType();
if (HasLocalVariableExternalStorage(*I))
QT = Context->getPointerType(QT);
QT.getAsStringInternal(Name, Context->getPrintingPolicy());
S += Name + " = __cself->" +
(*I)->getNameAsString() + "; // bound by copy\n";
}
}
std::string RewrittenStr = RewrittenBlockExprs[CE];
const char *cstr = RewrittenStr.c_str();
while (*cstr++ != '{') ;
S += cstr;
S += "\n";
return S;
}
std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
StringRef funcName,
std::string Tag) {
std::string StructRef = "struct " + Tag;
std::string S = "static void __";
S += funcName;
S += "_block_copy_" + utostr(i);
S += "(" + StructRef;
S += "*dst, " + StructRef;
S += "*src) {";
for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_assign((void*)&dst->";
S += VD->getNameAsString();
S += ", (void*)src->";
S += VD->getNameAsString();
if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
else
S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
}
S += "}\n";
S += "\nstatic void __";
S += funcName;
S += "_block_dispose_" + utostr(i);
S += "(" + StructRef;
S += "*src) {";
for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_dispose((void*)src->";
S += VD->getNameAsString();
if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
else
S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
}
S += "}\n";
return S;
}
std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
std::string Desc) {
std::string S = "\nstruct " + Tag;
std::string Constructor = " " + Tag;
S += " {\n struct __block_impl impl;\n";
S += " struct " + Desc;
S += "* Desc;\n";
Constructor += "(void *fp, "; // Invoke function pointer.
Constructor += "struct " + Desc; // Descriptor pointer.
Constructor += " *desc";
if (BlockDeclRefs.size()) {
// Output all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
// Handle nested closure invocation. For example:
//
// void (^myImportedBlock)(void);
// myImportedBlock = ^(void) { setGlobalInt(x + y); };
//
// void (^anotherBlock)(void);
// anotherBlock = ^(void) {
// myImportedBlock(); // import and invoke the closure
// };
//
if (isTopLevelBlockPointerType((*I)->getType())) {
S += "struct __block_impl *";
Constructor += ", void *" + ArgName;
} else {
QualType QT = (*I)->getType();
if (HasLocalVariableExternalStorage(*I))
QT = Context->getPointerType(QT);
QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
Constructor += ", " + ArgName;
}
S += FieldName + ";\n";
}
// Output all "by ref" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
{
std::string TypeString;
RewriteByRefString(TypeString, FieldName, (*I));
TypeString += " *";
FieldName = TypeString + FieldName;
ArgName = TypeString + ArgName;
Constructor += ", " + ArgName;
}
S += FieldName + "; // by ref\n";
}
// Finish writing the constructor.
Constructor += ", int flags=0)";
// Initialize all "by copy" arguments.
bool firsTime = true;
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
firsTime = false;
}
else
Constructor += ", ";
if (isTopLevelBlockPointerType((*I)->getType()))
Constructor += Name + "((struct __block_impl *)_" + Name + ")";
else
Constructor += Name + "(_" + Name + ")";
}
// Initialize all "by ref" arguments.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
firsTime = false;
}
else
Constructor += ", ";
Constructor += Name + "(_" + Name + "->__forwarding)";
}
Constructor += " {\n";
if (GlobalVarDecl)
Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
else
Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
Constructor += " Desc = desc;\n";
} else {
// Finish writing the constructor.
Constructor += ", int flags=0) {\n";
if (GlobalVarDecl)
Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
else
Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
Constructor += " Desc = desc;\n";
}
Constructor += " ";
Constructor += "}\n";
S += Constructor;
S += "};\n";
return S;
}
std::string RewriteObjC::SynthesizeBlockDescriptor(std::string DescTag,
std::string ImplTag, int i,
StringRef FunName,
unsigned hasCopy) {
std::string S = "\nstatic struct " + DescTag;
S += " {\n unsigned long reserved;\n";
S += " unsigned long Block_size;\n";
if (hasCopy) {
S += " void (*copy)(struct ";
S += ImplTag; S += "*, struct ";
S += ImplTag; S += "*);\n";
S += " void (*dispose)(struct ";
S += ImplTag; S += "*);\n";
}
S += "} ";
S += DescTag + "_DATA = { 0, sizeof(struct ";
S += ImplTag + ")";
if (hasCopy) {
S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
}
S += "};\n";
return S;
}
void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName) {
// Insert declaration for the function in which block literal is used.
if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
bool RewriteSC = (GlobalVarDecl &&
!Blocks.empty() &&
GlobalVarDecl->getStorageClass() == SC_Static &&
GlobalVarDecl->getType().getCVRQualifiers());
if (RewriteSC) {
std::string SC(" void __");
SC += GlobalVarDecl->getNameAsString();
SC += "() {}";
InsertText(FunLocStart, SC);
}
// Insert closures that were part of the function.
for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
CollectBlockDeclRefInfo(Blocks[i]);
// Need to copy-in the inner copied-in variables not actually used in this
// block.
for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
DeclRefExpr *Exp = InnerDeclRefs[count++];
ValueDecl *VD = Exp->getDecl();
BlockDeclRefs.push_back(Exp);
if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
// imported objects in the inner blocks not used in the outer
// blocks must be copied/disposed in the outer block as well.
if (VD->hasAttr<BlocksAttr>() ||
VD->getType()->isObjCObjectPointerType() ||
VD->getType()->isBlockPointerType())
ImportedBlockDecls.insert(VD);
}
std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
InsertText(FunLocStart, CI);
std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
InsertText(FunLocStart, CF);
if (ImportedBlockDecls.size()) {
std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
InsertText(FunLocStart, HF);
}
std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
ImportedBlockDecls.size() > 0);
InsertText(FunLocStart, BD);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
}
if (RewriteSC) {
// Must insert any 'const/volatile/static here. Since it has been
// removed as result of rewriting of block literals.
std::string SC;
if (GlobalVarDecl->getStorageClass() == SC_Static)
SC = "static ";
if (GlobalVarDecl->getType().isConstQualified())
SC += "const ";
if (GlobalVarDecl->getType().isVolatileQualified())
SC += "volatile ";
if (GlobalVarDecl->getType().isRestrictQualified())
SC += "restrict ";
InsertText(FunLocStart, SC);
}
Blocks.clear();
InnerDeclRefsCount.clear();
InnerDeclRefs.clear();
RewrittenBlockExprs.clear();
}
void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
StringRef FuncName = FD->getName();
SynthesizeBlockLiterals(FunLocStart, FuncName);
}
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
Name = IFace->getName();
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
while ((loc = Name.find(":", loc)) != std::string::npos)
Name.replace(loc, 1, "_");
}
void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
//fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
//SourceLocation FunLocStart = MD->getLocStart();
SourceLocation FunLocStart = MD->getLocStart();
std::string FuncName;
BuildUniqueMethodName(FuncName, MD);
SynthesizeBlockLiterals(FunLocStart, FuncName);
}
void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
for (Stmt *SubStmt : S->children())
if (SubStmt) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt))
GetBlockDeclRefExprs(CBE->getBody());
else
GetBlockDeclRefExprs(SubStmt);
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
if (DRE->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DRE->getDecl()))
// FIXME: Handle enums.
BlockDeclRefs.push_back(DRE);
return;
}
void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
for (Stmt *SubStmt : S->children())
if (SubStmt) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt)) {
InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
GetInnerBlockDeclRefExprs(CBE->getBody(),
InnerBlockDeclRefs,
InnerContexts);
}
else
GetInnerBlockDeclRefExprs(SubStmt, InnerBlockDeclRefs, InnerContexts);
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
if (DRE->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DRE->getDecl())) {
if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
InnerBlockDeclRefs.push_back(DRE);
if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
if (Var->isFunctionOrMethodVarDecl())
ImportedLocalExternalDecls.insert(Var);
}
}
return;
}
/// convertFunctionTypeOfBlocks - This routine converts a function type
/// whose result type may be a block pointer or whose argument type(s)
/// might be block pointers to an equivalent function type replacing
/// all block pointers to function pointers.
QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
QualType Res = FT->getReturnType();
bool HasBlockType = convertBlockPointerToFunctionPointer(Res);
if (FTP) {
for (auto &I : FTP->param_types()) {
QualType t = I;
// Make sure we convert "t (^)(...)" to "t (*)(...)".
if (convertBlockPointerToFunctionPointer(t))
HasBlockType = true;
ArgTypes.push_back(t);
}
}
QualType FuncType;
// FIXME. Does this work if block takes no argument but has a return type
// which is of block type?
if (HasBlockType)
FuncType = getSimpleFunctionType(Res, ArgTypes);
else FuncType = QualType(FT, 0);
return FuncType;
}
Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
// Navigate to relevant type information.
const BlockPointerType *CPT = nullptr;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
CPT = DRE->getType()->getAs<BlockPointerType>();
} else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
CPT = MExpr->getType()->getAs<BlockPointerType>();
}
else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
return SynthesizeBlockCall(Exp, PRE->getSubExpr());
}
else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
CPT = IEXPR->getType()->getAs<BlockPointerType>();
else if (const ConditionalOperator *CEXPR =
dyn_cast<ConditionalOperator>(BlockExp)) {
Expr *LHSExp = CEXPR->getLHS();
Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
Expr *RHSExp = CEXPR->getRHS();
Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
Expr *CONDExp = CEXPR->getCond();
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(CONDExp,
SourceLocation(), cast<Expr>(LHSStmt),
SourceLocation(), cast<Expr>(RHSStmt),
Exp->getType(), VK_RValue, OK_Ordinary);
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
} else if (const PseudoObjectExpr *POE
= dyn_cast<PseudoObjectExpr>(BlockExp)) {
CPT = POE->getType()->castAs<BlockPointerType>();
} else {
assert(1 && "RewriteBlockClass: Bad type");
}
assert(CPT && "RewriteBlockClass: Bad type");
const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
assert(FT && "RewriteBlockClass: Bad type");
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
// Push the block argument type.
ArgTypes.push_back(PtrBlock);
if (FTP) {
for (auto &I : FTP->param_types()) {
QualType t = I;
// Make sure we convert "t (^)(...)" to "t (*)(...)".
if (!convertBlockPointerToFunctionPointer(t))
convertToUnqualifiedObjCType(t);
ArgTypes.push_back(t);
}
}
// Now do the pointer to function cast.
QualType PtrToFuncCastType = getSimpleFunctionType(Exp->getType(), ArgTypes);
PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
CK_BitCast,
const_cast<Expr*>(BlockExp));
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
BlkCast);
//PE->dump();
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("FuncPtr"),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
MemberExpr *ME =
new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
CK_BitCast, ME);
PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
SmallVector<Expr*, 8> BlkExprs;
// Add the implicit argument.
BlkExprs.push_back(BlkCast);
// Add the user arguments.
for (CallExpr::arg_iterator I = Exp->arg_begin(),
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
Exp->getType(), VK_RValue,
SourceLocation());
return CE;
}
// We need to return the rewritten expression to handle cases where the
// BlockDeclRefExpr is embedded in another expression being rewritten.
// For example:
//
// int main() {
// __block Foo *f;
// __block int i;
//
// void (^myblock)() = ^() {
// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten).
// i = 77;
// };
//}
Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
// Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
// for each DeclRefExp where BYREFVAR is name of the variable.
ValueDecl *VD = DeclRefExp->getDecl();
bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DeclRefExp->getDecl());
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("__forwarding"),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
StringRef Name = VD->getName();
FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
&Context->Idents.get(Name),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
ME =
new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
DeclRefExp->getType(), VK_LValue, OK_Ordinary);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
DeclRefExp->getExprLoc(),
ME);
ReplaceStmt(DeclRefExp, PE);
return PE;
}
// Rewrites the imported local variable V with external storage
// (static, extern, etc.) as *V
//
Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
ValueDecl *VD = DRE->getDecl();
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
VK_LValue, OK_Ordinary,
DRE->getLocation());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
ReplaceStmt(DRE, PE);
return PE;
}
void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) {
SourceLocation LocStart = CE->getLParenLoc();
SourceLocation LocEnd = CE->getRParenLoc();
// Need to avoid trying to rewrite synthesized casts.
if (LocStart.isInvalid())
return;
// Need to avoid trying to rewrite casts contained in macros.
if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
return;
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
QualType QT = CE->getType();
const Type* TypePtr = QT->getAs<Type>();
if (isa<TypeOfExprType>(TypePtr)) {
const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
std::string TypeAsString = "(";
RewriteBlockPointerType(TypeAsString, QT);
TypeAsString += ")";
ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
return;
}
// advance the location to startArgList.
const char *argPtr = startBuf;
while (*argPtr++ && (argPtr < endBuf)) {
switch (*argPtr) {
case '^':
// Replace the '^' with '*'.
LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
ReplaceText(LocStart, 1, "*");
break;
}
}
return;
}
void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
SourceLocation DeclLoc = FD->getLocation();
unsigned parenCount = 0;
// We have 1 or more arguments that have closure pointers.
const char *startBuf = SM->getCharacterData(DeclLoc);
const char *startArgList = strchr(startBuf, '(');
assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
parenCount++;
// advance the location to startArgList.
DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
assert((DeclLoc.isValid()) && "Invalid DeclLoc");
const char *argPtr = startArgList;
while (*argPtr++ && parenCount) {
switch (*argPtr) {
case '^':
// Replace the '^' with '*'.
DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
ReplaceText(DeclLoc, 1, "*");
break;
case '(':
parenCount++;
break;
case ')':
parenCount--;
break;
}
}
return;
}
bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
const FunctionProtoType *FTP;
const PointerType *PT = QT->getAs<PointerType>();
if (PT) {
FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
} else {
const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
}
if (FTP) {
for (const auto &I : FTP->param_types())
if (isTopLevelBlockPointerType(I))
return true;
}
return false;
}
bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
const FunctionProtoType *FTP;
const PointerType *PT = QT->getAs<PointerType>();
if (PT) {
FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
} else {
const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
}
if (FTP) {
for (const auto &I : FTP->param_types()) {
if (I->isObjCQualifiedIdType())
return true;
if (I->isObjCObjectPointerType() &&
I->getPointeeType()->isObjCQualifiedInterfaceType())
return true;
}
}
return false;
}
void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen) {
const char *argPtr = strchr(Name, '(');
assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
LParen = argPtr; // output the start.
argPtr++; // skip past the left paren.
unsigned parenCount = 1;
while (*argPtr && parenCount) {
switch (*argPtr) {
case '(': parenCount++; break;
case ')': parenCount--; break;
default: break;
}
if (parenCount) argPtr++;
}
assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
RParen = argPtr; // output the end
}
void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
RewriteBlockPointerFunctionArgs(FD);
return;
}
// Handle Variables and Typedefs.
SourceLocation DeclLoc = ND->getLocation();
QualType DeclT;
if (VarDecl *VD = dyn_cast<VarDecl>(ND))
DeclT = VD->getType();
else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
DeclT = TDD->getUnderlyingType();
else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
DeclT = FD->getType();
else
llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
const char *startBuf = SM->getCharacterData(DeclLoc);
const char *endBuf = startBuf;
// scan backward (from the decl location) for the end of the previous decl.
while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
startBuf--;
SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
std::string buf;
unsigned OrigLength=0;
// *startBuf != '^' if we are dealing with a pointer to function that
// may take block argument types (which will be handled below).
if (*startBuf == '^') {
// Replace the '^' with '*', computing a negative offset.
buf = '*';
startBuf++;
OrigLength++;
}
while (*startBuf != ')') {
buf += *startBuf;
startBuf++;
OrigLength++;
}
buf += ')';
OrigLength++;
if (PointerTypeTakesAnyBlockArguments(DeclT) ||
PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
// Replace the '^' with '*' for arguments.
// Replace id<P> with id/*<>*/
DeclLoc = ND->getLocation();
startBuf = SM->getCharacterData(DeclLoc);
const char *argListBegin, *argListEnd;
GetExtentOfArgList(startBuf, argListBegin, argListEnd);
while (argListBegin < argListEnd) {
if (*argListBegin == '^')
buf += '*';
else if (*argListBegin == '<') {
buf += "/*";
buf += *argListBegin++;
OrigLength++;
while (*argListBegin != '>') {
buf += *argListBegin++;
OrigLength++;
}
buf += *argListBegin;
buf += "*/";
}
else
buf += *argListBegin;
argListBegin++;
OrigLength++;
}
buf += ')';
OrigLength++;
}
ReplaceText(Start, OrigLength, buf);
return;
}
/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
/// struct Block_byref_id_object *src) {
/// _Block_object_assign (&_dest->object, _src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
/// [|BLOCK_FIELD_IS_WEAK]) // object
/// _Block_object_assign(&_dest->object, _src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
/// [|BLOCK_FIELD_IS_WEAK]) // block
/// }
/// And:
/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
/// _Block_object_dispose(_src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
/// [|BLOCK_FIELD_IS_WEAK]) // object
/// _Block_object_dispose(_src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
/// [|BLOCK_FIELD_IS_WEAK]) // block
/// }
std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
int flag) {
std::string S;
if (CopyDestroyCache.count(flag))
return S;
CopyDestroyCache.insert(flag);
S = "static void __Block_byref_id_object_copy_";
S += utostr(flag);
S += "(void *dst, void *src) {\n";
// offset into the object pointer is computed as:
// void * + void* + int + int + void* + void *
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
unsigned VoidPtrSize =
static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
S += " _Block_object_assign((char*)dst + ";
S += utostr(offset);
S += ", *(void * *) ((char*)src + ";
S += utostr(offset);
S += "), ";
S += utostr(flag);
S += ");\n}\n";
S += "static void __Block_byref_id_object_dispose_";
S += utostr(flag);
S += "(void *src) {\n";
S += " _Block_object_dispose(*(void * *) ((char*)src + ";
S += utostr(offset);
S += "), ";
S += utostr(flag);
S += ");\n}\n";
return S;
}
/// RewriteByRefVar - For each __block typex ND variable this routine transforms
/// the declaration into:
/// struct __Block_byref_ND {
/// void *__isa; // NULL for everything except __weak pointers
/// struct __Block_byref_ND *__forwarding;
/// int32_t __flags;
/// int32_t __size;
/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
/// typex ND;
/// };
///
/// It then replaces declaration of ND variable with:
/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
/// __size=sizeof(struct __Block_byref_ND),
/// ND=initializer-if-any};
///
///
void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
// Insert declaration for the function in which block literal is
// used.
if (CurFunctionDeclToDeclareForBlock)
RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
int flag = 0;
int isa = 0;
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
if (DeclLoc.isInvalid())
// If type location is missing, it is because of missing type (a warning).
// Use variable's location which is good for this case.
DeclLoc = ND->getLocation();
const char *startBuf = SM->getCharacterData(DeclLoc);
SourceLocation X = ND->getLocEnd();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
std::string Name(ND->getNameAsString());
std::string ByrefType;
RewriteByRefString(ByrefType, Name, ND, true);
ByrefType += " {\n";
ByrefType += " void *__isa;\n";
RewriteByRefString(ByrefType, Name, ND);
ByrefType += " *__forwarding;\n";
ByrefType += " int __flags;\n";
ByrefType += " int __size;\n";
// Add void *__Block_byref_id_object_copy;
// void *__Block_byref_id_object_dispose; if needed.
QualType Ty = ND->getType();
bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty, ND);
if (HasCopyAndDispose) {
ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
}
QualType T = Ty;
(void)convertBlockPointerToFunctionPointer(T);
T.getAsStringInternal(Name, Context->getPrintingPolicy());
ByrefType += " " + Name + ";\n";
ByrefType += "};\n";
// Insert this type in global scope. It is needed by helper function.
SourceLocation FunLocStart;
if (CurFunctionDef)
FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
else {
assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
FunLocStart = CurMethodDef->getLocStart();
}
InsertText(FunLocStart, ByrefType);
if (Ty.isObjCGCWeak()) {
flag |= BLOCK_FIELD_IS_WEAK;
isa = 1;
}
if (HasCopyAndDispose) {
flag = BLOCK_BYREF_CALLER;
QualType Ty = ND->getType();
// FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
if (Ty->isBlockPointerType())
flag |= BLOCK_FIELD_IS_BLOCK;
else
flag |= BLOCK_FIELD_IS_OBJECT;
std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
if (!HF.empty())
InsertText(FunLocStart, HF);
}
// struct __Block_byref_ND ND =
// {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
// initializer-if-any};
bool hasInit = (ND->getInit() != nullptr);
unsigned flags = 0;
if (HasCopyAndDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
Name = ND->getNameAsString();
ByrefType.clear();
RewriteByRefString(ByrefType, Name, ND);
std::string ForwardingCastType("(");
ForwardingCastType += ByrefType + " *)";
if (!hasInit) {
ByrefType += " " + Name + " = {(void*)";
ByrefType += utostr(isa);
ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
ByrefType += utostr(flags);
ByrefType += ", ";
ByrefType += "sizeof(";
RewriteByRefString(ByrefType, Name, ND);
ByrefType += ")";
if (HasCopyAndDispose) {
ByrefType += ", __Block_byref_id_object_copy_";
ByrefType += utostr(flag);
ByrefType += ", __Block_byref_id_object_dispose_";
ByrefType += utostr(flag);
}
ByrefType += "};\n";
unsigned nameSize = Name.size();
// for block or function pointer declaration. Name is aleady
// part of the declaration.
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
nameSize = 1;
ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
}
else {
SourceLocation startLoc;
Expr *E = ND->getInit();
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
startLoc = E->getLocStart();
startLoc = SM->getExpansionLoc(startLoc);
endBuf = SM->getCharacterData(startLoc);
ByrefType += " " + Name;
ByrefType += " = {(void*)";
ByrefType += utostr(isa);
ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
ByrefType += utostr(flags);
ByrefType += ", ";
ByrefType += "sizeof(";
RewriteByRefString(ByrefType, Name, ND);
ByrefType += "), ";
if (HasCopyAndDispose) {
ByrefType += "__Block_byref_id_object_copy_";
ByrefType += utostr(flag);
ByrefType += ", __Block_byref_id_object_dispose_";
ByrefType += utostr(flag);
ByrefType += ", ";
}
ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
// Complete the newly synthesized compound expression by inserting a right
// curly brace before the end of the declaration.
// FIXME: This approach avoids rewriting the initializer expression. It
// also assumes there is only one declarator. For example, the following
// isn't currently supported by this routine (in general):
//
// double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
//
const char *startInitializerBuf = SM->getCharacterData(startLoc);
const char *semiBuf = strchr(startInitializerBuf, ';');
assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
SourceLocation semiLoc =
startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
InsertText(semiLoc, "}");
}
return;
}
void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
// Add initializers for any closure decl refs.
GetBlockDeclRefExprs(Exp->getBody());
if (BlockDeclRefs.size()) {
// Unique all "by copy" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
}
}
// Unique all "by ref" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
}
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
BlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
}
}
FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) {
IdentifierInfo *ID = &Context->Idents.get(name);
QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, FType, nullptr, SC_Extern,
false, false);
}
Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs) {
const BlockDecl *block = Exp->getBlockDecl();
Blocks.push_back(Exp);
CollectBlockDeclRefInfo(Exp);
// Add inner imported variables now used in current block.
int countOfInnerDecls = 0;
if (!InnerBlockDeclRefs.empty()) {
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
DeclRefExpr *Exp = InnerBlockDeclRefs[i];
ValueDecl *VD = Exp->getDecl();
if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
// We need to save the copied-in variables in nested
// blocks because it is needed at the end for some of the API generations.
// See SynthesizeBlockLiterals routine.
InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
}
InnerDeclRefsCount.push_back(countOfInnerDecls);
std::string FuncName;
if (CurFunctionDef)
FuncName = CurFunctionDef->getNameAsString();
else if (CurMethodDef)
BuildUniqueMethodName(FuncName, CurMethodDef);
else if (GlobalVarDecl)
FuncName = std::string(GlobalVarDecl->getNameAsString());
std::string BlockNumber = utostr(Blocks.size()-1);
std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber;
std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
// Get a pointer to the function type so we can cast appropriately.
QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
QualType FType = Context->getPointerType(BFT);
FunctionDecl *FD;
Expr *NewRep;
// Simulate a constructor call...
FD = SynthBlockInitFunctionDecl(Tag);
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
SourceLocation());
SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
VK_LValue, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
InitExprs.push_back(castExpr);
// Initialize the block descriptor.
std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData.c_str()),
Context->VoidPtrTy, nullptr,
SC_Static);
UnaryOperator *DescRefExpr =
new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
Context->VoidPtrTy,
VK_LValue,
SourceLocation()),
UO_AddrOf,
Context->getPointerType(Context->VoidPtrTy),
VK_RValue, OK_Ordinary,
SourceLocation());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
if (BlockDeclRefs.size()) {
Expr *Exp;
// Output all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
OK_Ordinary, SourceLocation());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
OK_Ordinary, SourceLocation());
}
}
InitExprs.push_back(Exp);
}
// Output all "by ref" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
ValueDecl *ND = (*I);
std::string Name(ND->getNameAsString());
std::string RecName;
RewriteByRefString(RecName, Name, ND, true);
IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ sizeof("struct"));
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
bool isNestedCapturedVar = false;
if (block)
for (const auto &CI : block->captures()) {
const VarDecl *variable = CI.getVariable();
if (variable == ND && CI.isNested()) {
assert (CI.isByRef() &&
"SynthBlockInitExpr - captured block variable is not byref");
isNestedCapturedVar = true;
break;
}
}
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
Context->getPointerType(Exp->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
}
if (ImportedBlockDecls.size()) {
// generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
FType, VK_LValue, SourceLocation());
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
return NewRep;
}
bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
if (const ObjCForCollectionStmt * CS =
dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
return CS->getElement() == DS;
return false;
}
//===----------------------------------------------------------------------===//
// Function Body / Expression rewriting
//===----------------------------------------------------------------------===//
Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
isa<DoStmt>(S) || isa<ForStmt>(S))
Stmts.push_back(S);
else if (isa<ObjCForCollectionStmt>(S)) {
Stmts.push_back(S);
ObjCBcLabelNo.push_back(++BcLabelCount);
}
// Pseudo-object operations and ivar references need special
// treatment because we're going to recursively rewrite them.
if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
return RewritePropertyOrImplicitSetter(PseudoOp);
} else {
return RewritePropertyOrImplicitGetter(PseudoOp);
}
} else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
return RewriteObjCIvarRefExpr(IvarRefExpr);
}
SourceRange OrigStmtRange = S->getSourceRange();
// Perform a bottom up rewrite of all children.
for (Stmt *&childStmt : S->children())
if (childStmt) {
Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
if (newStmt) {
childStmt = newStmt;
}
}
if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
InnerContexts.insert(BE->getBlockDecl());
ImportedLocalExternalDecls.clear();
GetInnerBlockDeclRefExprs(BE->getBody(),
InnerBlockDeclRefs, InnerContexts);
// Rewrite the block body in place.
Stmt *SaveCurrentBody = CurrentBody;
CurrentBody = BE->getBody();
PropParentMap = nullptr;
// block literal on rhs of a property-dot-sytax assignment
// must be replaced by its synthesize ast so getRewrittenText
// works as expected. In this case, what actually ends up on RHS
// is the blockTranscribed which is the helper function for the
// block literal; as in: self.c = ^() {[ace ARR];};
bool saveDisableReplaceStmt = DisableReplaceStmt;
DisableReplaceStmt = false;
RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
DisableReplaceStmt = saveDisableReplaceStmt;
CurrentBody = SaveCurrentBody;
PropParentMap = nullptr;
ImportedLocalExternalDecls.clear();
// Now we snarf the rewritten text and stash it away for later use.
std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
RewrittenBlockExprs[BE] = Str;
Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
//blockTranscribed->dump();
ReplaceStmt(S, blockTranscribed);
return blockTranscribed;
}
// Handle specific things.
if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
return RewriteAtEncode(AtEncode);
if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
return RewriteAtSelector(AtSelector);
if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
return RewriteObjCStringLiteral(AtString);
if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
#if 0
// Before we rewrite it, put the original message expression in a comment.
SourceLocation startLoc = MessExpr->getLocStart();
SourceLocation endLoc = MessExpr->getLocEnd();
const char *startBuf = SM->getCharacterData(startLoc);
const char *endBuf = SM->getCharacterData(endLoc);
std::string messString;
messString += "// ";
messString.append(startBuf, endBuf-startBuf+1);
messString += "\n";
// FIXME: Missing definition of
// InsertText(clang::SourceLocation, char const*, unsigned int).
// InsertText(startLoc, messString.c_str(), messString.size());
// Tried this, but it didn't work either...
// ReplaceText(startLoc, 0, messString.c_str(), messString.size());
#endif
return RewriteMessageExpr(MessExpr);
}
if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
return RewriteObjCTryStmt(StmtTry);
if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
return RewriteObjCSynchronizedStmt(StmtTry);
if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
return RewriteObjCThrowStmt(StmtThrow);
if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
return RewriteObjCProtocolExpr(ProtocolExp);
if (ObjCForCollectionStmt *StmtForCollection =
dyn_cast<ObjCForCollectionStmt>(S))
return RewriteObjCForCollectionStmt(StmtForCollection,
OrigStmtRange.getEnd());
if (BreakStmt *StmtBreakStmt =
dyn_cast<BreakStmt>(S))
return RewriteBreakStmt(StmtBreakStmt);
if (ContinueStmt *StmtContinueStmt =
dyn_cast<ContinueStmt>(S))
return RewriteContinueStmt(StmtContinueStmt);
// Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
// and cast exprs.
if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
// FIXME: What we're doing here is modifying the type-specifier that
// precedes the first Decl. In the future the DeclGroup should have
// a separate type-specifier that we can rewrite.
// NOTE: We need to avoid rewriting the DeclStmt if it is within
// the context of an ObjCForCollectionStmt. For example:
// NSArray *someArray;
// for (id <FooProtocol> index in someArray) ;
// This is because RewriteObjCForCollectionStmt() does textual rewriting
// and it depends on the original text locations/positions.
if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
// Blocks rewrite rules.
for (auto *SD : DS->decls()) {
if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
if (isTopLevelBlockPointerType(ND->getType()))
RewriteBlockPointerDecl(ND);
else if (ND->getType()->isFunctionPointerType())
CheckFunctionPointerDecl(ND->getType(), ND);
if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
if (VD->hasAttr<BlocksAttr>()) {
static unsigned uniqueByrefDeclCount = 0;
assert(!BlockByRefDeclNo.count(ND) &&
"RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
RewriteByRefVar(VD);
}
else
RewriteTypeOfDecl(VD);
}
}
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
}
}
}
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
RewriteObjCQualifiedInterfaceTypes(CE);
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
isa<DoStmt>(S) || isa<ForStmt>(S)) {
assert(!Stmts.empty() && "Statement stack is empty");
assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
&& "Statement stack mismatch");
Stmts.pop_back();
}
// Handle blocks rewriting.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
ValueDecl *VD = DRE->getDecl();
if (VD->hasAttr<BlocksAttr>())
return RewriteBlockDeclRefExpr(DRE);
if (HasLocalVariableExternalStorage(VD))
return RewriteLocalVariableExternalStorage(DRE);
}
if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
if (CE->getCallee()->getType()->isBlockPointerType()) {
Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
ReplaceStmt(S, BlockCall);
return BlockCall;
}
}
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
RewriteCastExpr(CE);
}
#if 0
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
ICE->getSubExpr(),
SourceLocation());
// Get the new text.
std::string SStr;
llvm::raw_string_ostream Buf(SStr);
Replacement->printPretty(Buf);
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
delete S;
return Replacement;
}
#endif
// Return this stmt unmodified.
return S;
}
void RewriteObjC::RewriteRecordBody(RecordDecl *RD) {
for (auto *FD : RD->fields()) {
if (isTopLevelBlockPointerType(FD->getType()))
RewriteBlockPointerDecl(FD);
if (FD->getType()->isObjCQualifiedIdType() ||
FD->getType()->isObjCQualifiedInterfaceType())
RewriteObjCQualifiedInterfaceTypes(FD);
}
}
/// HandleDeclInMainFile - This is called for each top-level decl defined in the
/// main file of the input.
void RewriteObjC::HandleDeclInMainFile(Decl *D) {
switch (D->getKind()) {
case Decl::Function: {
FunctionDecl *FD = cast<FunctionDecl>(D);
if (FD->isOverloadedOperator())
return;
// Since function prototypes don't have ParmDecl's, we check the function
// prototype. This enables us to rewrite function declarations and
// definitions using the same code.
RewriteBlocksInFunctionProtoType(FD->getType(), FD);
if (!FD->isThisDeclarationADefinition())
break;
// FIXME: If this should support Obj-C++, support CXXTryStmt
if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
CurFunctionDef = FD;
CurFunctionDeclToDeclareForBlock = FD;
CurrentBody = Body;
Body =
cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
FD->setBody(Body);
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
// This synthesizes and inserts the block "impl" struct, invoke function,
// and any copy/dispose helper functions.
InsertBlockLiteralsWithinFunction(FD);
CurFunctionDef = nullptr;
CurFunctionDeclToDeclareForBlock = nullptr;
}
break;
}
case Decl::ObjCMethod: {
ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
if (CompoundStmt *Body = MD->getCompoundBody()) {
CurMethodDef = MD;
CurrentBody = Body;
Body =
cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
MD->setBody(Body);
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
InsertBlockLiteralsWithinMethod(MD);
CurMethodDef = nullptr;
}
break;
}
case Decl::ObjCImplementation: {
ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
ClassImplementation.push_back(CI);
break;
}
case Decl::ObjCCategoryImpl: {
ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
CategoryImplementation.push_back(CI);
break;
}
case Decl::Var: {
VarDecl *VD = cast<VarDecl>(D);
RewriteObjCQualifiedInterfaceTypes(VD);
if (isTopLevelBlockPointerType(VD->getType()))
RewriteBlockPointerDecl(VD);
else if (VD->getType()->isFunctionPointerType()) {
CheckFunctionPointerDecl(VD->getType(), VD);
if (VD->getInit()) {
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
RewriteCastExpr(CE);
}
}
} else if (VD->getType()->isRecordType()) {
RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
if (VD->getInit()) {
GlobalVarDecl = VD;
CurrentBody = VD->getInit();
RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
GlobalVarDecl = nullptr;
// This is needed for blocks.
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
RewriteCastExpr(CE);
}
}
break;
}
case Decl::TypeAlias:
case Decl::Typedef: {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
}
break;
}
case Decl::CXXRecord:
case Decl::Record: {
RecordDecl *RD = cast<RecordDecl>(D);
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
break;
}
default:
break;
}
// Nothing yet.
}
void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
if (Diags.hasErrorOccurred())
return;
RewriteInclude();
// Here's a great place to add any extra declarations that may be needed.
// Write out meta data for each @protocol(<expr>).
for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls)
RewriteObjCProtocolMetaData(ProtDecl, "", "", Preamble);
InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
if (ClassImplementation.size() || CategoryImplementation.size())
RewriteImplementations();
// Get the buffer corresponding to MainFileID. If we haven't changed it, then
// we are done.
if (const RewriteBuffer *RewriteBuf =
Rewrite.getRewriteBufferFor(MainFileID)) {
//printf("Changed:\n");
*OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
} else {
llvm::errs() << "No changes\n";
}
if (ClassImplementation.size() || CategoryImplementation.size() ||
ProtocolExprDecls.size()) {
// Rewrite Objective-c meta data*
std::string ResultStr;
RewriteMetaDataIntoBuffer(ResultStr);
// Emit metadata.
*OutFile << ResultStr;
}
OutFile->flush();
}
void RewriteObjCFragileABI::Initialize(ASTContext &context) {
InitializeCommon(context);
// declaring objc_selector outside the parameter list removes a silly
// scope related warning...
if (IsHeader)
Preamble = "#pragma once\n";
Preamble += "struct objc_selector; struct objc_class;\n";
Preamble += "struct __rw_objc_super { struct objc_object *object; ";
Preamble += "struct objc_object *superClass; ";
if (LangOpts.MicrosoftExt) {
// Add a constructor for creating temporary objects.
Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
": ";
Preamble += "object(o), superClass(s) {} ";
}
Preamble += "};\n";
Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
Preamble += "typedef struct objc_object Protocol;\n";
Preamble += "#define _REWRITER_typedef_Protocol\n";
Preamble += "#endif\n";
if (LangOpts.MicrosoftExt) {
Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
} else
Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret";
Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret";
Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
Preamble += "(struct objc_class *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
Preamble += "(struct objc_class *, struct objc_object *);\n";
// @synchronized hooks.
Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter(struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit(struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
Preamble += "struct __objcFastEnumerationState {\n\t";
Preamble += "unsigned long state;\n\t";
Preamble += "void **itemsPtr;\n\t";
Preamble += "unsigned long *mutationsPtr;\n\t";
Preamble += "unsigned long extra[5];\n};\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
Preamble += "#define __FASTENUMERATIONSTATE\n";
Preamble += "#endif\n";
Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
Preamble += "struct __NSConstantStringImpl {\n";
Preamble += " int *isa;\n";
Preamble += " int flags;\n";
Preamble += " char *str;\n";
Preamble += " long length;\n";
Preamble += "};\n";
Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
Preamble += "#else\n";
Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
Preamble += "#endif\n";
Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
Preamble += "#endif\n";
// Blocks preamble.
Preamble += "#ifndef BLOCK_IMPL\n";
Preamble += "#define BLOCK_IMPL\n";
Preamble += "struct __block_impl {\n";
Preamble += " void *isa;\n";
Preamble += " int Flags;\n";
Preamble += " int Reserved;\n";
Preamble += " void *FuncPtr;\n";
Preamble += "};\n";
Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
Preamble += "extern \"C\" __declspec(dllexport) "
"void _Block_object_assign(void *, const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
Preamble += "#else\n";
Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
Preamble += "#endif\n";
Preamble += "#endif\n";
if (LangOpts.MicrosoftExt) {
Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
Preamble += "#define __attribute__(X)\n";
Preamble += "#endif\n";
Preamble += "#define __weak\n";
}
else {
Preamble += "#define __block\n";
Preamble += "#define __weak\n";
}
// NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
// as this avoids warning in any 64bit/32bit compilation model.
Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
}
/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
/// ivar offset.
void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) {
if (ivar->isBitField()) {
// FIXME: The hack below doesn't work for bitfields. For now, we simply
// place all bitfields at offset 0.
Result += "0";
} else {
Result += "__OFFSETOFIVAR__(struct ";
Result += ivar->getContainingInterface()->getNameAsString();
if (LangOpts.MicrosoftExt)
Result += "_IMPL";
Result += ", ";
Result += ivar->getNameAsString();
Result += ")";
}
}
/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
ObjCProtocolDecl *PDecl, StringRef prefix,
StringRef ClassName, std::string &Result) {
static bool objc_protocol_methods = false;
// Output struct protocol_methods holder of method selector and type.
if (!objc_protocol_methods && PDecl->hasDefinition()) {
/* struct protocol_methods {
SEL _cmd;
char *method_types;
}
*/
Result += "\nstruct _protocol_methods {\n";
Result += "\tstruct objc_selector *_cmd;\n";
Result += "\tchar *method_types;\n";
Result += "};\n";
objc_protocol_methods = true;
}
// Do not synthesize the protocol more than once.
if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
return;
if (ObjCProtocolDecl *Def = PDecl->getDefinition())
PDecl = Def;
if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
unsigned NumMethods = std::distance(PDecl->instmeth_begin(),
PDecl->instmeth_end());
/* struct _objc_protocol_method_list {
int protocol_method_count;
struct protocol_methods protocols[];
}
*/
Result += "\nstatic struct {\n";
Result += "\tint protocol_method_count;\n";
Result += "\tstruct _protocol_methods protocol_methods[";
Result += utostr(NumMethods);
Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
Result += PDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
"{\n\t" + utostr(NumMethods) + "\n";
// Output instance methods declared in this protocol.
for (ObjCProtocolDecl::instmeth_iterator
I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
I != E; ++I) {
if (I == PDecl->instmeth_begin())
Result += "\t ,{{(struct objc_selector *)\"";
else
Result += "\t ,{(struct objc_selector *)\"";
Result += (*I)->getSelector().getAsString();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\"}\n";
}
Result += "\t }\n};\n";
}
// Output class methods declared in this protocol.
unsigned NumMethods = std::distance(PDecl->classmeth_begin(),
PDecl->classmeth_end());
if (NumMethods > 0) {
/* struct _objc_protocol_method_list {
int protocol_method_count;
struct protocol_methods protocols[];
}
*/
Result += "\nstatic struct {\n";
Result += "\tint protocol_method_count;\n";
Result += "\tstruct _protocol_methods protocol_methods[";
Result += utostr(NumMethods);
Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
Result += PDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
"{\n\t";
Result += utostr(NumMethods);
Result += "\n";
// Output instance methods declared in this protocol.
for (ObjCProtocolDecl::classmeth_iterator
I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
I != E; ++I) {
if (I == PDecl->classmeth_begin())
Result += "\t ,{{(struct objc_selector *)\"";
else
Result += "\t ,{(struct objc_selector *)\"";
Result += (*I)->getSelector().getAsString();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\"}\n";
}
Result += "\t }\n};\n";
}
// Output:
/* struct _objc_protocol {
// Objective-C 1.0 extensions
struct _objc_protocol_extension *isa;
char *protocol_name;
struct _objc_protocol **protocol_list;
struct _objc_protocol_method_list *instance_methods;
struct _objc_protocol_method_list *class_methods;
};
*/
static bool objc_protocol = false;
if (!objc_protocol) {
Result += "\nstruct _objc_protocol {\n";
Result += "\tstruct _objc_protocol_extension *isa;\n";
Result += "\tchar *protocol_name;\n";
Result += "\tstruct _objc_protocol **protocol_list;\n";
Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
Result += "};\n";
objc_protocol = true;
}
Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
Result += PDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
"{\n\t0, \"";
Result += PDecl->getNameAsString();
Result += "\", 0, ";
if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
Result += PDecl->getNameAsString();
Result += ", ";
}
else
Result += "0, ";
if (PDecl->classmeth_begin() != PDecl->classmeth_end()) {
Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
Result += PDecl->getNameAsString();
Result += "\n";
}
else
Result += "0\n";
Result += "};\n";
// Mark this protocol as having been generated.
if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
llvm_unreachable("protocol already synthesized");
}
void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData(
const ObjCList<ObjCProtocolDecl> &Protocols,
StringRef prefix, StringRef ClassName,
std::string &Result) {
if (Protocols.empty()) return;
for (unsigned i = 0; i != Protocols.size(); i++)
RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
// Output the top lovel protocol meta-data for the class.
/* struct _objc_protocol_list {
struct _objc_protocol_list *next;
int protocol_count;
struct _objc_protocol *class_protocols[];
}
*/
Result += "\nstatic struct {\n";
Result += "\tstruct _objc_protocol_list *next;\n";
Result += "\tint protocol_count;\n";
Result += "\tstruct _objc_protocol *class_protocols[";
Result += utostr(Protocols.size());
Result += "];\n} _OBJC_";
Result += prefix;
Result += "_PROTOCOLS_";
Result += ClassName;
Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
"{\n\t0, ";
Result += utostr(Protocols.size());
Result += "\n";
Result += "\t,{&_OBJC_PROTOCOL_";
Result += Protocols[0]->getNameAsString();
Result += " \n";
for (unsigned i = 1; i != Protocols.size(); i++) {
Result += "\t ,&_OBJC_PROTOCOL_";
Result += Protocols[i]->getNameAsString();
Result += "\n";
}
Result += "\t }\n};\n";
}
void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result) {
ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
// Explicitly declared @interface's are already synthesized.
if (CDecl->isImplicitInterfaceDecl()) {
// FIXME: Implementation of a class with no @interface (legacy) does not
// produce correct synthesis as yet.
RewriteObjCInternalStruct(CDecl, Result);
}
// Build _objc_ivar_list metadata for classes ivars if needed
unsigned NumIvars = !IDecl->ivar_empty()
? IDecl->ivar_size()
: (CDecl ? CDecl->ivar_size() : 0);
if (NumIvars > 0) {
static bool objc_ivar = false;
if (!objc_ivar) {
/* struct _objc_ivar {
char *ivar_name;
char *ivar_type;
int ivar_offset;
};
*/
Result += "\nstruct _objc_ivar {\n";
Result += "\tchar *ivar_name;\n";
Result += "\tchar *ivar_type;\n";
Result += "\tint ivar_offset;\n";
Result += "};\n";
objc_ivar = true;
}
/* struct {
int ivar_count;
struct _objc_ivar ivar_list[nIvars];
};
*/
Result += "\nstatic struct {\n";
Result += "\tint ivar_count;\n";
Result += "\tstruct _objc_ivar ivar_list[";
Result += utostr(NumIvars);
Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
Result += IDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
"{\n\t";
Result += utostr(NumIvars);
Result += "\n";
ObjCInterfaceDecl::ivar_iterator IVI, IVE;
SmallVector<ObjCIvarDecl *, 8> IVars;
if (!IDecl->ivar_empty()) {
for (auto *IV : IDecl->ivars())
IVars.push_back(IV);
IVI = IDecl->ivar_begin();
IVE = IDecl->ivar_end();
} else {
IVI = CDecl->ivar_begin();
IVE = CDecl->ivar_end();
}
Result += "\t,{{\"";
Result += IVI->getNameAsString();
Result += "\", \"";
std::string TmpString, StrEncoding;
Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
RewriteIvarOffsetComputation(*IVI, Result);
Result += "}\n";
for (++IVI; IVI != IVE; ++IVI) {
Result += "\t ,{\"";
Result += IVI->getNameAsString();
Result += "\", \"";
std::string TmpString, StrEncoding;
Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI);
QuoteDoublequotes(TmpString, StrEncoding);
Result += StrEncoding;
Result += "\", ";
RewriteIvarOffsetComputation(*IVI, Result);
Result += "}\n";
}
Result += "\t }\n};\n";
}
// Build _objc_method_list for class's instance methods if needed
SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
// If any of our property implementations have associated getters or
// setters, produce metadata for them as well.
for (const auto *Prop : IDecl->property_impls()) {
if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
if (!Prop->getPropertyIvarDecl())
continue;
ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
if (!Getter->isDefined())
InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
if (!Setter->isDefined())
InstanceMethods.push_back(Setter);
}
RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
true, "", IDecl->getName(), Result);
// Build _objc_method_list for class's class methods if needed
RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
false, "", IDecl->getName(), Result);
// Protocols referenced in class declaration?
RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
"CLASS", CDecl->getName(), Result);
// Declaration of class/meta-class metadata
/* struct _objc_class {
struct _objc_class *isa; // or const char *root_class_name when metadata
const char *super_class_name;
char *name;
long version;
long info;
long instance_size;
struct _objc_ivar_list *ivars;
struct _objc_method_list *methods;
struct objc_cache *cache;
struct objc_protocol_list *protocols;
const char *ivar_layout;
struct _objc_class_ext *ext;
};
*/
static bool objc_class = false;
if (!objc_class) {
Result += "\nstruct _objc_class {\n";
Result += "\tstruct _objc_class *isa;\n";
Result += "\tconst char *super_class_name;\n";
Result += "\tchar *name;\n";
Result += "\tlong version;\n";
Result += "\tlong info;\n";
Result += "\tlong instance_size;\n";
Result += "\tstruct _objc_ivar_list *ivars;\n";
Result += "\tstruct _objc_method_list *methods;\n";
Result += "\tstruct objc_cache *cache;\n";
Result += "\tstruct _objc_protocol_list *protocols;\n";
Result += "\tconst char *ivar_layout;\n";
Result += "\tstruct _objc_class_ext *ext;\n";
Result += "};\n";
objc_class = true;
}
// Meta-class metadata generation.
ObjCInterfaceDecl *RootClass = nullptr;
ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
while (SuperClass) {
RootClass = SuperClass;
SuperClass = SuperClass->getSuperClass();
}
SuperClass = CDecl->getSuperClass();
Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
Result += CDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
"{\n\t(struct _objc_class *)\"";
Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
Result += "\"";
if (SuperClass) {
Result += ", \"";
Result += SuperClass->getNameAsString();
Result += "\", \"";
Result += CDecl->getNameAsString();
Result += "\"";
}
else {
Result += ", 0, \"";
Result += CDecl->getNameAsString();
Result += "\"";
}
// Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
// 'info' field is initialized to CLS_META(2) for metaclass
Result += ", 0,2, sizeof(struct _objc_class), 0";
if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
Result += IDecl->getNameAsString();
Result += "\n";
}
else
Result += ", 0\n";
if (CDecl->protocol_begin() != CDecl->protocol_end()) {
Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
Result += CDecl->getNameAsString();
Result += ",0,0\n";
}
else
Result += "\t,0,0,0,0\n";
Result += "};\n";
// class metadata generation.
Result += "\nstatic struct _objc_class _OBJC_CLASS_";
Result += CDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
"{\n\t&_OBJC_METACLASS_";
Result += CDecl->getNameAsString();
if (SuperClass) {
Result += ", \"";
Result += SuperClass->getNameAsString();
Result += "\", \"";
Result += CDecl->getNameAsString();
Result += "\"";
}
else {
Result += ", 0, \"";
Result += CDecl->getNameAsString();
Result += "\"";
}
// 'info' field is initialized to CLS_CLASS(1) for class
Result += ", 0,1";
if (!ObjCSynthesizedStructs.count(CDecl))
Result += ",0";
else {
// class has size. Must synthesize its size.
Result += ",sizeof(struct ";
Result += CDecl->getNameAsString();
if (LangOpts.MicrosoftExt)
Result += "_IMPL";
Result += ")";
}
if (NumIvars > 0) {
Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
Result += CDecl->getNameAsString();
Result += "\n\t";
}
else
Result += ",0";
if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
Result += CDecl->getNameAsString();
Result += ", 0\n\t";
}
else
Result += ",0,0";
if (CDecl->protocol_begin() != CDecl->protocol_end()) {
Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
Result += CDecl->getNameAsString();
Result += ", 0,0\n";
}
else
Result += ",0,0,0\n";
Result += "};\n";
}
void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) {
int ClsDefCount = ClassImplementation.size();
int CatDefCount = CategoryImplementation.size();
// For each implemented class, write out all its meta data.
for (int i = 0; i < ClsDefCount; i++)
RewriteObjCClassMetaData(ClassImplementation[i], Result);
// For each implemented category, write out all its meta data.
for (int i = 0; i < CatDefCount; i++)
RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
// Write objc_symtab metadata
/*
struct _objc_symtab
{
long sel_ref_cnt;
SEL *refs;
short cls_def_cnt;
short cat_def_cnt;
void *defs[cls_def_cnt + cat_def_cnt];
};
*/
Result += "\nstruct _objc_symtab {\n";
Result += "\tlong sel_ref_cnt;\n";
Result += "\tSEL *refs;\n";
Result += "\tshort cls_def_cnt;\n";
Result += "\tshort cat_def_cnt;\n";
Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
Result += "};\n\n";
Result += "static struct _objc_symtab "
"_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
Result += "\t0, 0, " + utostr(ClsDefCount)
+ ", " + utostr(CatDefCount) + "\n";
for (int i = 0; i < ClsDefCount; i++) {
Result += "\t,&_OBJC_CLASS_";
Result += ClassImplementation[i]->getNameAsString();
Result += "\n";
}
for (int i = 0; i < CatDefCount; i++) {
Result += "\t,&_OBJC_CATEGORY_";
Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
Result += "_";
Result += CategoryImplementation[i]->getNameAsString();
Result += "\n";
}
Result += "};\n\n";
// Write objc_module metadata
/*
struct _objc_module {
long version;
long size;
const char *name;
struct _objc_symtab *symtab;
}
*/
Result += "\nstruct _objc_module {\n";
Result += "\tlong version;\n";
Result += "\tlong size;\n";
Result += "\tconst char *name;\n";
Result += "\tstruct _objc_symtab *symtab;\n";
Result += "};\n\n";
Result += "static struct _objc_module "
"_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
Result += "\t" + utostr(OBJC_ABI_VERSION) +
", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
Result += "};\n\n";
if (LangOpts.MicrosoftExt) {
if (ProtocolExprDecls.size()) {
Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
Result += ProtDecl->getNameAsString();
Result += " = &_OBJC_PROTOCOL_";
Result += ProtDecl->getNameAsString();
Result += ";\n";
}
Result += "#pragma data_seg(pop)\n\n";
}
Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
Result += "&_OBJC_MODULES;\n";
Result += "#pragma data_seg(pop)\n\n";
}
}
/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
/// implementation.
void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
std::string &Result) {
ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
// Find category declaration for this implementation.
ObjCCategoryDecl *CDecl
= ClassDecl->FindCategoryDeclaration(IDecl->getIdentifier());
std::string FullCategoryName = ClassDecl->getNameAsString();
FullCategoryName += '_';
FullCategoryName += IDecl->getNameAsString();
// Build _objc_method_list for class's instance methods if needed
SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
// If any of our property implementations have associated getters or
// setters, produce metadata for them as well.
for (const auto *Prop : IDecl->property_impls()) {
if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
if (!Prop->getPropertyIvarDecl())
continue;
ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
InstanceMethods.push_back(Setter);
}
RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
true, "CATEGORY_", FullCategoryName.c_str(),
Result);
// Build _objc_method_list for class's class methods if needed
RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
false, "CATEGORY_", FullCategoryName.c_str(),
Result);
// Protocols referenced in class declaration?
// Null CDecl is case of a category implementation with no category interface
if (CDecl)
RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
FullCategoryName, Result);
/* struct _objc_category {
char *category_name;
char *class_name;
struct _objc_method_list *instance_methods;
struct _objc_method_list *class_methods;
struct _objc_protocol_list *protocols;
// Objective-C 1.0 extensions
uint32_t size; // sizeof (struct _objc_category)
struct _objc_property_list *instance_properties; // category's own
// @property decl.
};
*/
static bool objc_category = false;
if (!objc_category) {
Result += "\nstruct _objc_category {\n";
Result += "\tchar *category_name;\n";
Result += "\tchar *class_name;\n";
Result += "\tstruct _objc_method_list *instance_methods;\n";
Result += "\tstruct _objc_method_list *class_methods;\n";
Result += "\tstruct _objc_protocol_list *protocols;\n";
Result += "\tunsigned int size;\n";
Result += "\tstruct _objc_property_list *instance_properties;\n";
Result += "};\n";
objc_category = true;
}
Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
Result += FullCategoryName;
Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
Result += IDecl->getNameAsString();
Result += "\"\n\t, \"";
Result += ClassDecl->getNameAsString();
Result += "\"\n";
if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
Result += "\t, (struct _objc_method_list *)"
"&_OBJC_CATEGORY_INSTANCE_METHODS_";
Result += FullCategoryName;
Result += "\n";
}
else
Result += "\t, 0\n";
if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
Result += "\t, (struct _objc_method_list *)"
"&_OBJC_CATEGORY_CLASS_METHODS_";
Result += FullCategoryName;
Result += "\n";
}
else
Result += "\t, 0\n";
if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
Result += FullCategoryName;
Result += "\n";
}
else
Result += "\t, 0\n";
Result += "\t, sizeof(struct _objc_category), 0\n};\n";
}
// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
/// class methods.
template<typename MethodIterator>
void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
MethodIterator MethodEnd,
bool IsInstanceMethod,
StringRef prefix,
StringRef ClassName,
std::string &Result) {
if (MethodBegin == MethodEnd) return;
if (!objc_impl_method) {
/* struct _objc_method {
SEL _cmd;
char *method_types;
void *_imp;
}
*/
Result += "\nstruct _objc_method {\n";
Result += "\tSEL _cmd;\n";
Result += "\tchar *method_types;\n";
Result += "\tvoid *_imp;\n";
Result += "};\n";
objc_impl_method = true;
}
// Build _objc_method_list for class's methods if needed
/* struct {
struct _objc_method_list *next_method;
int method_count;
struct _objc_method method_list[];
}
*/
unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
Result += "\nstatic struct {\n";
Result += "\tstruct _objc_method_list *next_method;\n";
Result += "\tint method_count;\n";
Result += "\tstruct _objc_method method_list[";
Result += utostr(NumMethods);
Result += "];\n} _OBJC_";
Result += prefix;
Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
Result += "_METHODS_";
Result += ClassName;
Result += " __attribute__ ((used, section (\"__OBJC, __";
Result += IsInstanceMethod ? "inst" : "cls";
Result += "_meth\")))= ";
Result += "{\n\t0, " + utostr(NumMethods) + "\n";
Result += "\t,{{(SEL)\"";
Result += (*MethodBegin)->getSelector().getAsString().c_str();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
Result += MethodInternalNames[*MethodBegin];
Result += "}\n";
for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
Result += "\t ,{(SEL)\"";
Result += (*MethodBegin)->getSelector().getAsString().c_str();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
Result += MethodInternalNames[*MethodBegin];
Result += "}\n";
}
Result += "\t }\n};\n";
}
Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
SourceRange OldRange = IV->getSourceRange();
Expr *BaseExpr = IV->getBase();
// Rewrite the base, but without actually doing replaces.
{
DisableReplaceStmtScope S(*this);
BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
IV->setBase(BaseExpr);
}
ObjCIvarDecl *D = IV->getDecl();
Expr *Replacement = IV;
if (CurMethodDef) {
if (BaseExpr->getType()->isObjCObjectPointerType()) {
const ObjCInterfaceType *iFaceDecl =
dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
// lookup which class implements the instance variable.
ObjCInterfaceDecl *clsDeclared = nullptr;
iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
clsDeclared);
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
std::string RecName = clsDeclared->getIdentifier()->getName();
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(),
OldRange.getEnd(),
castExpr);
if (IV->isFreeIvar() &&
declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) {
MemberExpr *ME = new (Context)
MemberExpr(PE, true, SourceLocation(), D, IV->getLocation(),
D->getType(), VK_LValue, OK_Ordinary);
Replacement = ME;
} else {
IV->setBase(PE);
}
}
} else { // we are outside a method.
assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
// Explicit ivar refs need to have a cast inserted.
// FIXME: consider sharing some of this code with the code above.
if (BaseExpr->getType()->isObjCObjectPointerType()) {
const ObjCInterfaceType *iFaceDecl =
dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
// lookup which class implements the instance variable.
ObjCInterfaceDecl *clsDeclared = nullptr;
iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
clsDeclared);
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
std::string RecName = clsDeclared->getIdentifier()->getName();
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
CK_BitCast,
IV->getBase());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
IV->getBase()->getLocEnd(), castExpr);
// Cannot delete IV->getBase(), since PE points to it.
// Replace the old base with the cast. This is important when doing
// embedded rewrites. For example, [newInv->_container addObject:0].
IV->setBase(PE);
}
}
ReplaceStmtWithRange(IV, Replacement, OldRange);
return Replacement;
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/RewriteMacros.cpp | //===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This code rewrites macro invocations into their expansions. This gives you
// a macro expanded file that retains comments and #includes.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
#include <memory>
using namespace clang;
/// isSameToken - Return true if the two specified tokens start have the same
/// content.
static bool isSameToken(Token &RawTok, Token &PPTok) {
// If two tokens have the same kind and the same identifier info, they are
// obviously the same.
if (PPTok.getKind() == RawTok.getKind() &&
PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
return true;
// Otherwise, if they are different but have the same identifier info, they
// are also considered to be the same. This allows keywords and raw lexed
// identifiers with the same name to be treated the same.
if (PPTok.getIdentifierInfo() &&
PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
return true;
return false;
}
/// GetNextRawTok - Return the next raw token in the stream, skipping over
/// comments if ReturnComment is false.
static const Token &GetNextRawTok(const std::vector<Token> &RawTokens,
unsigned &CurTok, bool ReturnComment) {
assert(CurTok < RawTokens.size() && "Overran eof!");
// If the client doesn't want comments and we have one, skip it.
if (!ReturnComment && RawTokens[CurTok].is(tok::comment))
++CurTok;
return RawTokens[CurTok++];
}
/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into
/// the specified vector.
static void LexRawTokensFromMainFile(Preprocessor &PP,
std::vector<Token> &RawTokens) {
SourceManager &SM = PP.getSourceManager();
// Create a lexer to lex all the tokens of the main file in raw mode. Even
// though it is in raw mode, it will not return comments.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
// Switch on comment lexing because we really do want them.
RawLex.SetCommentRetentionState(true);
Token RawTok;
do {
RawLex.LexFromRawLexer(RawTok);
// If we have an identifier with no identifier info for our raw token, look
// up the indentifier info. This is important for equality comparison of
// identifier tokens.
if (RawTok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(RawTok);
RawTokens.push_back(RawTok);
} while (RawTok.isNot(tok::eof));
}
/// RewriteMacrosInInput - Implement -rewrite-macros mode.
void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) {
SourceManager &SM = PP.getSourceManager();
Rewriter Rewrite;
Rewrite.setSourceMgr(SM, PP.getLangOpts());
RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID());
std::vector<Token> RawTokens;
LexRawTokensFromMainFile(PP, RawTokens);
unsigned CurRawTok = 0;
Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
// Get the first preprocessing token.
PP.EnterMainSourceFile();
Token PPTok;
PP.Lex(PPTok);
// Preprocess the input file in parallel with raw lexing the main file. Ignore
// all tokens that are preprocessed from a file other than the main file (e.g.
// a header). If we see tokens that are in the preprocessed file but not the
// lexed file, we have a macro expansion. If we see tokens in the lexed file
// that aren't in the preprocessed view, we have macros that expand to no
// tokens, or macro arguments etc.
while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) {
SourceLocation PPLoc = SM.getExpansionLoc(PPTok.getLocation());
// If PPTok is from a different source file, ignore it.
if (!SM.isWrittenInMainFile(PPLoc)) {
PP.Lex(PPTok);
continue;
}
// If the raw file hits a preprocessor directive, they will be extra tokens
// in the raw file that don't exist in the preprocsesed file. However, we
// choose to preserve them in the output file and otherwise handle them
// specially.
if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) {
// If this is a #warning directive or #pragma mark (GNU extensions),
// comment the line out.
if (RawTokens[CurRawTok].is(tok::identifier)) {
const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo();
if (II->getName() == "warning") {
// Comment out #warning.
RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
} else if (II->getName() == "pragma" &&
RawTokens[CurRawTok+1].is(tok::identifier) &&
(RawTokens[CurRawTok+1].getIdentifierInfo()->getName() ==
"mark")) {
// Comment out #pragma mark.
RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
}
}
// Otherwise, if this is a #include or some other directive, just leave it
// in the file by skipping over the line.
RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof))
RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
continue;
}
// Okay, both tokens are from the same file. Get their offsets from the
// start of the file.
unsigned PPOffs = SM.getFileOffset(PPLoc);
unsigned RawOffs = SM.getFileOffset(RawTok.getLocation());
// If the offsets are the same and the token kind is the same, ignore them.
if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) {
RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
PP.Lex(PPTok);
continue;
}
// If the PP token is farther along than the raw token, something was
// deleted. Comment out the raw token.
if (RawOffs <= PPOffs) {
// Comment out a whole run of tokens instead of bracketing each one with
// comments. Add a leading space if RawTok didn't have one.
bool HasSpace = RawTok.hasLeadingSpace();
RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]);
unsigned EndPos;
do {
EndPos = RawOffs+RawTok.getLength();
RawTok = GetNextRawTok(RawTokens, CurRawTok, true);
RawOffs = SM.getFileOffset(RawTok.getLocation());
if (RawTok.is(tok::comment)) {
// Skip past the comment.
RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
break;
}
} while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() &&
(PPOffs != RawOffs || !isSameToken(RawTok, PPTok)));
RB.InsertTextBefore(EndPos, "*/");
continue;
}
// Otherwise, there was a replacement an expansion. Insert the new token
// in the output buffer. Insert the whole run of new tokens at once to get
// them in the right order.
unsigned InsertPos = PPOffs;
std::string Expansion;
while (PPOffs < RawOffs) {
Expansion += ' ' + PP.getSpelling(PPTok);
PP.Lex(PPTok);
PPLoc = SM.getExpansionLoc(PPTok.getLocation());
PPOffs = SM.getFileOffset(PPLoc);
}
Expansion += ' ';
RB.InsertTextBefore(InsertPos, Expansion);
}
// Get the buffer corresponding to MainFileID. If we haven't changed it, then
// we are done.
if (const RewriteBuffer *RewriteBuf =
Rewrite.getRewriteBufferFor(SM.getMainFileID())) {
//printf("Changed:\n");
*OS << std::string(RewriteBuf->begin(), RewriteBuf->end());
} else {
fprintf(stderr, "No changes\n");
}
OS->flush();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp | //===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This code rewrites include invocations into their expansions. This gives you
// a file with all included files merged into it.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace llvm;
namespace {
class InclusionRewriter : public PPCallbacks {
/// Information about which #includes were actually performed,
/// created by preprocessor callbacks.
struct IncludedFile {
FileID Id;
SrcMgr::CharacteristicKind FileType;
IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType)
: Id(Id), FileType(FileType) {}
};
Preprocessor &PP; ///< Used to find inclusion directives.
SourceManager &SM; ///< Used to read and manage source files.
raw_ostream &OS; ///< The destination stream for rewritten contents.
StringRef MainEOL; ///< The line ending marker to use.
const llvm::MemoryBuffer *PredefinesBuffer; ///< The preprocessor predefines.
bool ShowLineMarkers; ///< Show #line markers.
bool UseLineDirectives; ///< Use of line directives or line markers.
/// Tracks where inclusions that change the file are found.
std::map<unsigned, IncludedFile> FileIncludes;
/// Tracks where inclusions that import modules are found.
std::map<unsigned, const Module *> ModuleIncludes;
/// Used transitively for building up the FileIncludes mapping over the
/// various \c PPCallbacks callbacks.
SourceLocation LastInclusionLocation;
public:
InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers,
bool UseLineDirectives);
bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) {
PredefinesBuffer = Buf;
}
void detectMainFileEOL();
private:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok,
SrcMgr::CharacteristicKind FileType) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
const Module *Imported) override;
void WriteLineInfo(const char *Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef Extra = StringRef());
void WriteImplicitModuleImport(const Module *Mod);
void OutputContentUpTo(const MemoryBuffer &FromFile,
unsigned &WriteFrom, unsigned WriteTo,
StringRef EOL, int &lines,
bool EnsureNewline);
void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
const MemoryBuffer &FromFile, StringRef EOL,
unsigned &NextToWrite, int &Lines);
bool HandleHasInclude(FileID FileId, Lexer &RawLex,
const DirectoryLookup *Lookup, Token &Tok,
bool &FileExists);
const IncludedFile *FindIncludeAtLocation(SourceLocation Loc) const;
const Module *FindModuleAtLocation(SourceLocation Loc) const;
StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken);
};
} // end anonymous namespace
/// Initializes an InclusionRewriter with a \p PP source and \p OS destination.
InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
bool ShowLineMarkers,
bool UseLineDirectives)
: PP(PP), SM(PP.getSourceManager()), OS(OS), MainEOL("\n"),
PredefinesBuffer(nullptr), ShowLineMarkers(ShowLineMarkers),
UseLineDirectives(UseLineDirectives),
LastInclusionLocation(SourceLocation()) {}
/// Write appropriate line information as either #line directives or GNU line
/// markers depending on what mode we're in, including the \p Filename and
/// \p Line we are located at, using the specified \p EOL line separator, and
/// any \p Extra context specifiers in GNU line directives.
void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef Extra) {
if (!ShowLineMarkers)
return;
if (UseLineDirectives) {
OS << "#line" << ' ' << Line << ' ' << '"';
OS.write_escaped(Filename);
OS << '"';
} else {
// Use GNU linemarkers as described here:
// http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
OS << '#' << ' ' << Line << ' ' << '"';
OS.write_escaped(Filename);
OS << '"';
if (!Extra.empty())
OS << Extra;
if (FileType == SrcMgr::C_System)
// "`3' This indicates that the following text comes from a system header
// file, so certain warnings should be suppressed."
OS << " 3";
else if (FileType == SrcMgr::C_ExternCSystem)
// as above for `3', plus "`4' This indicates that the following text
// should be treated as being wrapped in an implicit extern "C" block."
OS << " 3 4";
}
OS << MainEOL;
}
void InclusionRewriter::WriteImplicitModuleImport(const Module *Mod) {
OS << "@import " << Mod->getFullModuleName() << ";"
<< " /* clang -frewrite-includes: implicit import */" << MainEOL;
}
/// FileChanged - Whenever the preprocessor enters or exits a #include file
/// it invokes this handler.
void InclusionRewriter::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
SrcMgr::CharacteristicKind NewFileType,
FileID) {
if (Reason != EnterFile)
return;
if (LastInclusionLocation.isInvalid())
// we didn't reach this file (eg: the main file) via an inclusion directive
return;
FileID Id = FullSourceLoc(Loc, SM).getFileID();
auto P = FileIncludes.insert(std::make_pair(
LastInclusionLocation.getRawEncoding(), IncludedFile(Id, NewFileType)));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
LastInclusionLocation = SourceLocation();
}
/// Called whenever an inclusion is skipped due to canonical header protection
/// macros.
void InclusionRewriter::FileSkipped(const FileEntry &/*SkippedFile*/,
const Token &/*FilenameTok*/,
SrcMgr::CharacteristicKind /*FileType*/) {
assert(!LastInclusionLocation.isInvalid() &&
"A file, that wasn't found via an inclusion directive, was skipped");
LastInclusionLocation = SourceLocation();
}
/// This should be called whenever the preprocessor encounters include
/// directives. It does not say whether the file has been included, but it
/// provides more information about the directive (hash location instead
/// of location inside the included file). It is assumed that the matching
/// FileChanged() or FileSkipped() is called after this.
void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
const Token &/*IncludeTok*/,
StringRef /*FileName*/,
bool /*IsAngled*/,
CharSourceRange /*FilenameRange*/,
const FileEntry * /*File*/,
StringRef /*SearchPath*/,
StringRef /*RelativePath*/,
const Module *Imported) {
assert(LastInclusionLocation.isInvalid() &&
"Another inclusion directive was found before the previous one "
"was processed");
if (Imported) {
auto P = ModuleIncludes.insert(
std::make_pair(HashLoc.getRawEncoding(), Imported));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
} else
LastInclusionLocation = HashLoc;
}
/// Simple lookup for a SourceLocation (specifically one denoting the hash in
/// an inclusion directive) in the map of inclusion information, FileChanges.
const InclusionRewriter::IncludedFile *
InclusionRewriter::FindIncludeAtLocation(SourceLocation Loc) const {
const auto I = FileIncludes.find(Loc.getRawEncoding());
if (I != FileIncludes.end())
return &I->second;
return nullptr;
}
/// Simple lookup for a SourceLocation (specifically one denoting the hash in
/// an inclusion directive) in the map of module inclusion information.
const Module *
InclusionRewriter::FindModuleAtLocation(SourceLocation Loc) const {
const auto I = ModuleIncludes.find(Loc.getRawEncoding());
if (I != ModuleIncludes.end())
return I->second;
return nullptr;
}
/// Detect the likely line ending style of \p FromFile by examining the first
/// newline found within it.
static StringRef DetectEOL(const MemoryBuffer &FromFile) {
// Detect what line endings the file uses, so that added content does not mix
// the style. We need to check for "\r\n" first because "\n\r" will match
// "\r\n\r\n".
const char *Pos = strchr(FromFile.getBufferStart(), '\n');
if (!Pos)
return "\n";
if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
return "\r\n";
if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
return "\n\r";
return "\n";
}
void InclusionRewriter::detectMainFileEOL() {
bool Invalid;
const MemoryBuffer &FromFile = *SM.getBuffer(SM.getMainFileID(), &Invalid);
assert(!Invalid);
if (Invalid)
return; // Should never happen, but whatever.
MainEOL = DetectEOL(FromFile);
}
/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
/// \p WriteTo - 1.
void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile,
unsigned &WriteFrom, unsigned WriteTo,
StringRef LocalEOL, int &Line,
bool EnsureNewline) {
if (WriteTo <= WriteFrom)
return;
if (&FromFile == PredefinesBuffer) {
// Ignore the #defines of the predefines buffer.
WriteFrom = WriteTo;
return;
}
// If we would output half of a line ending, advance one character to output
// the whole line ending. All buffers are null terminated, so looking ahead
// one byte is safe.
if (LocalEOL.size() == 2 &&
LocalEOL[0] == (FromFile.getBufferStart() + WriteTo)[-1] &&
LocalEOL[1] == (FromFile.getBufferStart() + WriteTo)[0])
WriteTo++;
StringRef TextToWrite(FromFile.getBufferStart() + WriteFrom,
WriteTo - WriteFrom);
if (MainEOL == LocalEOL) {
OS << TextToWrite;
// count lines manually, it's faster than getPresumedLoc()
Line += TextToWrite.count(LocalEOL);
if (EnsureNewline && !TextToWrite.endswith(LocalEOL))
OS << MainEOL;
} else {
// Output the file one line at a time, rewriting the line endings as we go.
StringRef Rest = TextToWrite;
while (!Rest.empty()) {
StringRef LineText;
std::tie(LineText, Rest) = Rest.split(LocalEOL);
OS << LineText;
Line++;
if (!Rest.empty())
OS << MainEOL;
}
if (TextToWrite.endswith(LocalEOL) || EnsureNewline)
OS << MainEOL;
}
WriteFrom = WriteTo;
}
/// Print characters from \p FromFile starting at \p NextToWrite up until the
/// inclusion directive at \p StartToken, then print out the inclusion
/// inclusion directive disabled by a #if directive, updating \p NextToWrite
/// and \p Line to track the number of source lines visited and the progress
/// through the \p FromFile buffer.
void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
const Token &StartToken,
const MemoryBuffer &FromFile,
StringRef LocalEOL,
unsigned &NextToWrite, int &Line) {
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(StartToken.getLocation()), LocalEOL, Line,
false);
Token DirectiveToken;
do {
DirectiveLex.LexFromRawLexer(DirectiveToken);
} while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof));
if (&FromFile == PredefinesBuffer) {
// OutputContentUpTo() would not output anything anyway.
return;
}
OS << "#if 0 /* expanded by -frewrite-includes */" << MainEOL;
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(DirectiveToken.getLocation()) +
DirectiveToken.getLength(),
LocalEOL, Line, true);
OS << "#endif /* expanded by -frewrite-includes */" << MainEOL;
}
/// Find the next identifier in the pragma directive specified by \p RawToken.
StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
Token &RawToken) {
RawLex.LexFromRawLexer(RawToken);
if (RawToken.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(RawToken);
if (RawToken.is(tok::identifier))
return RawToken.getIdentifierInfo()->getName();
return StringRef();
}
// Expand __has_include and __has_include_next if possible. If there's no
// definitive answer return false.
bool InclusionRewriter::HandleHasInclude(
FileID FileId, Lexer &RawLex, const DirectoryLookup *Lookup, Token &Tok,
bool &FileExists) {
// Lex the opening paren.
RawLex.LexFromRawLexer(Tok);
if (Tok.isNot(tok::l_paren))
return false;
RawLex.LexFromRawLexer(Tok);
SmallString<128> FilenameBuffer;
StringRef Filename;
// Since the raw lexer doesn't give us angle_literals we have to parse them
// ourselves.
// FIXME: What to do if the file name is a macro?
if (Tok.is(tok::less)) {
RawLex.LexFromRawLexer(Tok);
FilenameBuffer += '<';
do {
if (Tok.is(tok::eod)) // Sanity check.
return false;
if (Tok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(Tok);
// Get the string piece.
SmallVector<char, 128> TmpBuffer;
bool Invalid = false;
StringRef TmpName = PP.getSpelling(Tok, TmpBuffer, &Invalid);
if (Invalid)
return false;
FilenameBuffer += TmpName;
RawLex.LexFromRawLexer(Tok);
} while (Tok.isNot(tok::greater));
FilenameBuffer += '>';
Filename = FilenameBuffer;
} else {
if (Tok.isNot(tok::string_literal))
return false;
bool Invalid = false;
Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
if (Invalid)
return false;
}
// Lex the closing paren.
RawLex.LexFromRawLexer(Tok);
if (Tok.isNot(tok::r_paren))
return false;
// Now ask HeaderInfo if it knows about the header.
// FIXME: Subframeworks aren't handled here. Do we care?
bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
const DirectoryLookup *CurDir;
const FileEntry *FileEnt = PP.getSourceManager().getFileEntryForID(FileId);
SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1>
Includers;
Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
Filename, SourceLocation(), isAngled, nullptr, CurDir, Includers, nullptr,
nullptr, nullptr, false);
FileExists = File != nullptr;
return true;
}
/// Use a raw lexer to analyze \p FileId, incrementally copying parts of it
/// and including content of included files recursively.
bool InclusionRewriter::Process(FileID FileId,
SrcMgr::CharacteristicKind FileType)
{
bool Invalid;
const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
assert(!Invalid && "Attempting to process invalid inclusion");
const char *FileName = FromFile.getBufferIdentifier();
Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
StringRef LocalEOL = DetectEOL(FromFile);
// Per the GNU docs: "1" indicates entering a new file.
if (FileId == SM.getMainFileID() || FileId == PP.getPredefinesFileID())
WriteLineInfo(FileName, 1, FileType, "");
else
WriteLineInfo(FileName, 1, FileType, " 1");
if (SM.getFileIDSize(FileId) == 0)
return false;
// The next byte to be copied from the source file, which may be non-zero if
// the lexer handled a BOM.
unsigned NextToWrite = SM.getFileOffset(RawLex.getSourceLocation());
assert(SM.getLineNumber(FileId, NextToWrite) == 1);
int Line = 1; // The current input file line number.
Token RawToken;
RawLex.LexFromRawLexer(RawToken);
// TODO: Consider adding a switch that strips possibly unimportant content,
// such as comments, to reduce the size of repro files.
while (RawToken.isNot(tok::eof)) {
if (RawToken.is(tok::hash) && RawToken.isAtStartOfLine()) {
RawLex.setParsingPreprocessorDirective(true);
Token HashToken = RawToken;
RawLex.LexFromRawLexer(RawToken);
if (RawToken.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(RawToken);
if (RawToken.getIdentifierInfo() != nullptr) {
switch (RawToken.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_include_next:
case tok::pp_import: {
CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL, NextToWrite,
Line);
if (FileId != PP.getPredefinesFileID())
WriteLineInfo(FileName, Line - 1, FileType, "");
StringRef LineInfoExtra;
SourceLocation Loc = HashToken.getLocation();
if (const Module *Mod = FindModuleAtLocation(Loc))
WriteImplicitModuleImport(Mod);
else if (const IncludedFile *Inc = FindIncludeAtLocation(Loc)) {
// include and recursively process the file
if (Process(Inc->Id, Inc->FileType)) {
// and set lineinfo back to this file, if the nested one was
// actually included
// `2' indicates returning to a file (after having included
// another file.
LineInfoExtra = " 2";
}
}
// fix up lineinfo (since commented out directive changed line
// numbers) for inclusions that were skipped due to header guards
WriteLineInfo(FileName, Line, FileType, LineInfoExtra);
break;
}
case tok::pp_pragma: {
StringRef Identifier = NextIdentifierName(RawLex, RawToken);
if (Identifier == "clang" || Identifier == "GCC") {
if (NextIdentifierName(RawLex, RawToken) == "system_header") {
// keep the directive in, commented out
CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
NextToWrite, Line);
// update our own type
FileType = SM.getFileCharacteristic(RawToken.getLocation());
WriteLineInfo(FileName, Line, FileType);
}
} else if (Identifier == "once") {
// keep the directive in, commented out
CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
NextToWrite, Line);
WriteLineInfo(FileName, Line, FileType);
}
break;
}
case tok::pp_if:
case tok::pp_elif: {
bool elif = (RawToken.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_elif);
// Rewrite special builtin macros to avoid pulling in host details.
do {
// Walk over the directive.
RawLex.LexFromRawLexer(RawToken);
if (RawToken.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(RawToken);
if (RawToken.is(tok::identifier)) {
bool HasFile;
SourceLocation Loc = RawToken.getLocation();
// Rewrite __has_include(x)
if (RawToken.getIdentifierInfo()->isStr("__has_include")) {
if (!HandleHasInclude(FileId, RawLex, nullptr, RawToken,
HasFile))
continue;
// Rewrite __has_include_next(x)
} else if (RawToken.getIdentifierInfo()->isStr(
"__has_include_next")) {
const DirectoryLookup *Lookup = PP.GetCurDirLookup();
if (Lookup)
++Lookup;
if (!HandleHasInclude(FileId, RawLex, Lookup, RawToken,
HasFile))
continue;
} else {
continue;
}
// Replace the macro with (0) or (1), followed by the commented
// out macro for reference.
OutputContentUpTo(FromFile, NextToWrite, SM.getFileOffset(Loc),
LocalEOL, Line, false);
OS << '(' << (int) HasFile << ")/*";
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(RawToken.getLocation()) +
RawToken.getLength(),
LocalEOL, Line, false);
OS << "*/";
}
} while (RawToken.isNot(tok::eod));
if (elif) {
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(RawToken.getLocation()) +
RawToken.getLength(),
LocalEOL, Line, /*EnsureNewline=*/ true);
WriteLineInfo(FileName, Line, FileType);
}
break;
}
case tok::pp_endif:
case tok::pp_else: {
// We surround every #include by #if 0 to comment it out, but that
// changes line numbers. These are fixed up right after that, but
// the whole #include could be inside a preprocessor conditional
// that is not processed. So it is necessary to fix the line
// numbers one the next line after each #else/#endif as well.
RawLex.SetKeepWhitespaceMode(true);
do {
RawLex.LexFromRawLexer(RawToken);
} while (RawToken.isNot(tok::eod) && RawToken.isNot(tok::eof));
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(RawToken.getLocation()) +
RawToken.getLength(),
LocalEOL, Line, /*EnsureNewline=*/ true);
WriteLineInfo(FileName, Line, FileType);
RawLex.SetKeepWhitespaceMode(false);
} break;
default:
break;
}
}
RawLex.setParsingPreprocessorDirective(false);
}
RawLex.LexFromRawLexer(RawToken);
}
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(SM.getLocForEndOfFile(FileId)), LocalEOL,
Line, /*EnsureNewline=*/true);
return true;
}
/// InclusionRewriterInInput - Implement -frewrite-includes mode.
void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
const PreprocessorOutputOptions &Opts) {
SourceManager &SM = PP.getSourceManager();
InclusionRewriter *Rewrite = new InclusionRewriter(
PP, *OS, Opts.ShowLineMarkers, Opts.UseLineDirectives);
Rewrite->detectMainFileEOL();
PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Rewrite));
PP.IgnorePragmas();
// First let the preprocessor process the entire file and call callbacks.
// Callbacks will record which #include's were actually performed.
PP.EnterMainSourceFile();
Token Tok;
// Only preprocessor directives matter here, so disable macro expansion
// everywhere else as an optimization.
// TODO: It would be even faster if the preprocessor could be switched
// to a mode where it would parse only preprocessor directives and comments,
// nothing else matters for parsing or processing.
PP.SetMacroExpansionOnlyInDirectives();
do {
PP.Lex(Tok);
} while (Tok.isNot(tok::eof));
Rewrite->setPredefinesBuffer(SM.getBuffer(PP.getPredefinesFileID()));
Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
OS->flush();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/CMakeLists.txt | set(LLVM_OPTIONAL_SOURCES RewriteModernObjC.cpp) # HLSL Change - ignore file
set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangRewriteFrontend
FixItRewriter.cpp
FrontendActions_rewrite.cpp
HTMLPrint.cpp
InclusionRewriter.cpp
RewriteMacros.cpp
# RewriteModernObjC.cpp # HLSL Change
RewriteObjC.cpp
RewriteTest.cpp
LINK_LIBS
clangAST
clangBasic
clangEdit
clangFrontend
clangLex
clangRewrite
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/RewriteTest.cpp | //===--- RewriteTest.cpp - Rewriter playground ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is a testbed.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/TokenRewriter.h"
#include "llvm/Support/raw_ostream.h"
void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
SourceManager &SM = PP.getSourceManager();
const LangOptions &LangOpts = PP.getLangOpts();
TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts);
// Throw <i> </i> tags around comments.
for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
E = Rewriter.token_end(); I != E; ++I) {
if (I->isNot(tok::comment)) continue;
Rewriter.AddTokenBefore(I, "<i>");
Rewriter.AddTokenAfter(I, "</i>");
}
// Print out the output.
for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
E = Rewriter.token_end(); I != E; ++I)
*OS << PP.getSpelling(*I);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/FrontendActions_rewrite.cpp | //===--- FrontendActions.cpp ----------------------------------------------===//
///////////////////////////////////////////////////////////////////////////////
// //
// FrontendActions_rewrite.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/Parser.h"
#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/Rewrite/Frontend/FixItRewriter.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
using namespace clang;
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
std::unique_ptr<ASTConsumer>
HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
return CreateHTMLPrinter(OS, CI.getPreprocessor());
return nullptr;
}
FixItAction::FixItAction() {}
FixItAction::~FixItAction() {}
std::unique_ptr<ASTConsumer>
FixItAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return llvm::make_unique<ASTConsumer>();
}
namespace {
class FixItRewriteInPlace : public FixItOptions {
public:
FixItRewriteInPlace() { InPlace = true; }
std::string RewriteFilename(const std::string &Filename, int &fd) override {
llvm_unreachable("don't call RewriteFilename for inplace rewrites");
}
};
class FixItActionSuffixInserter : public FixItOptions {
std::string NewSuffix;
public:
FixItActionSuffixInserter(std::string NewSuffix, bool FixWhatYouCan)
: NewSuffix(NewSuffix) {
this->FixWhatYouCan = FixWhatYouCan;
}
std::string RewriteFilename(const std::string &Filename, int &fd) override {
fd = -1;
SmallString<128> Path(Filename);
llvm::sys::path::replace_extension(
Path, NewSuffix + llvm::sys::path::extension(Path));
return Path.str();
}
};
class FixItRewriteToTemp : public FixItOptions {
public:
std::string RewriteFilename(const std::string &Filename, int &fd) override {
SmallString<128> Path;
llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
llvm::sys::path::extension(Filename), fd,
Path);
return Path.str();
}
};
} // end anonymous namespace
bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
StringRef Filename) {
const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts();
if (!FEOpts.FixItSuffix.empty()) {
FixItOpts.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix,
FEOpts.FixWhatYouCan));
} else {
FixItOpts.reset(new FixItRewriteInPlace);
FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
}
Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
CI.getLangOpts(), FixItOpts.get()));
return true;
}
void FixItAction::EndSourceFileAction() {
// Otherwise rewrite all files.
Rewriter->WriteFixedFiles();
}
bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
std::vector<std::pair<std::string, std::string>> RewrittenFiles;
bool err = false;
{
const FrontendOptions &FEOpts = CI.getFrontendOpts();
std::unique_ptr<FrontendAction> FixAction(new SyntaxOnlyAction());
if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) {
std::unique_ptr<FixItOptions> FixItOpts;
if (FEOpts.FixToTemporaries)
FixItOpts.reset(new FixItRewriteToTemp());
else
FixItOpts.reset(new FixItRewriteInPlace());
FixItOpts->Silent = true;
FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings;
FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(),
CI.getLangOpts(), FixItOpts.get());
FixAction->Execute();
err = Rewriter.WriteFixedFiles(&RewrittenFiles);
FixAction->EndSourceFile();
CI.setSourceManager(nullptr);
CI.setFileManager(nullptr);
} else {
err = true;
}
}
if (err)
return false;
CI.getDiagnosticClient().clear();
CI.getDiagnostics().Reset();
PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(),
RewrittenFiles.begin(), RewrittenFiles.end());
PPOpts.RemappedFilesKeepOriginalName = false;
return true;
}
#ifdef CLANG_ENABLE_OBJC_REWRITER
std::unique_ptr<ASTConsumer>
RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
return CreateModernObjCRewriter(
InFile, OS, CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros,
(CI.getCodeGenOpts().getDebugInfo() != CodeGenOptions::NoDebugInfo));
return CreateObjCRewriter(InFile, OS, CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
}
return nullptr;
}
#endif
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
void RewriteMacrosAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
if (!OS)
return;
RewriteMacrosInInput(CI.getPreprocessor(), OS);
}
void RewriteTestAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile());
if (!OS)
return;
DoRewriteTest(CI.getPreprocessor(), OS);
}
void RewriteIncludesAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
if (!OS)
return;
RewriteIncludesInInput(CI.getPreprocessor(), OS,
CI.getPreprocessorOutputOpts());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp | //===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Hacks and fun related to the code rewriter.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#ifdef CLANG_ENABLE_OBJC_REWRITER
using namespace clang;
using llvm::utostr;
namespace {
class RewriteModernObjC : public ASTConsumer {
protected:
enum {
BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
block, ... */
BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
__block variable */
BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
helpers */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
};
enum {
BLOCK_NEEDS_FREE = (1 << 24),
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GC = (1 << 27),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_HAS_DESCRIPTOR = (1 << 29)
};
Rewriter Rewrite;
DiagnosticsEngine &Diags;
const LangOptions &LangOpts;
ASTContext *Context;
SourceManager *SM;
TranslationUnitDecl *TUDecl;
FileID MainFileID;
const char *MainFileStart, *MainFileEnd;
Stmt *CurrentBody;
ParentMap *PropParentMap; // created lazily.
std::string InFileName;
raw_ostream* OutFile;
std::string Preamble;
TypeDecl *ProtocolTypeDecl;
VarDecl *GlobalVarDecl;
Expr *GlobalConstructionExp;
unsigned RewriteFailedDiag;
unsigned GlobalBlockRewriteFailedDiag;
// ObjC string constant support.
unsigned NumObjCStringLiterals;
VarDecl *ConstantStringClassReference;
RecordDecl *NSStringRecord;
// ObjC foreach break/continue generation support.
int BcLabelCount;
unsigned TryFinallyContainsReturnDiag;
// Needed for super.
ObjCMethodDecl *CurMethodDef;
RecordDecl *SuperStructDecl;
RecordDecl *ConstantStringDecl;
FunctionDecl *MsgSendFunctionDecl;
FunctionDecl *MsgSendSuperFunctionDecl;
FunctionDecl *MsgSendStretFunctionDecl;
FunctionDecl *MsgSendSuperStretFunctionDecl;
FunctionDecl *MsgSendFpretFunctionDecl;
FunctionDecl *GetClassFunctionDecl;
FunctionDecl *GetMetaClassFunctionDecl;
FunctionDecl *GetSuperClassFunctionDecl;
FunctionDecl *SelGetUidFunctionDecl;
FunctionDecl *CFStringFunctionDecl;
FunctionDecl *SuperConstructorFunctionDecl;
FunctionDecl *CurFunctionDef;
/* Misc. containers needed for meta-data rewrite. */
SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCWrittenInterfaces;
llvm::SmallPtrSet<TagDecl*, 32> GlobalDefinedTags;
SmallVector<ObjCInterfaceDecl*, 32> ObjCInterfacesSeen;
/// DefinedNonLazyClasses - List of defined "non-lazy" classes.
SmallVector<ObjCInterfaceDecl*, 8> DefinedNonLazyClasses;
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
SmallVector<ObjCCategoryDecl *, 8> DefinedNonLazyCategories;
SmallVector<Stmt *, 32> Stmts;
SmallVector<int, 8> ObjCBcLabelNo;
// Remember all the @protocol(<expr>) expressions.
llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
llvm::DenseSet<uint64_t> CopyDestroyCache;
// Block expressions.
SmallVector<BlockExpr *, 32> Blocks;
SmallVector<int, 32> InnerDeclRefsCount;
SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
// Block related declarations.
SmallVector<ValueDecl *, 8> BlockByCopyDecls;
llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
SmallVector<ValueDecl *, 8> BlockByRefDecls;
llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
llvm::DenseMap<ObjCInterfaceDecl *,
llvm::SmallPtrSet<ObjCIvarDecl *, 8> > ReferencedIvars;
// ivar bitfield grouping containers
llvm::DenseSet<const ObjCInterfaceDecl *> ObjCInterefaceHasBitfieldGroups;
llvm::DenseMap<const ObjCIvarDecl* , unsigned> IvarGroupNumber;
// This container maps an <class, group number for ivar> tuple to the type
// of the struct where the bitfield belongs.
llvm::DenseMap<std::pair<const ObjCInterfaceDecl*, unsigned>, QualType> GroupRecordType;
SmallVector<FunctionDecl*, 32> FunctionDefinitionsSeen;
// This maps an original source AST to it's rewritten form. This allows
// us to avoid rewriting the same node twice (which is very uncommon).
// This is needed to support some of the exotic property rewriting.
llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
// Needed for header files being rewritten
bool IsHeader;
bool SilenceRewriteMacroWarning;
bool GenerateLineInfo;
bool objc_impl_method;
bool DisableReplaceStmt;
class DisableReplaceStmtScope {
RewriteModernObjC &R;
bool SavedValue;
public:
DisableReplaceStmtScope(RewriteModernObjC &R)
: R(R), SavedValue(R.DisableReplaceStmt) {
R.DisableReplaceStmt = true;
}
~DisableReplaceStmtScope() {
R.DisableReplaceStmt = SavedValue;
}
};
void InitializeCommon(ASTContext &context);
public:
llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
// Top Level Driver code.
bool HandleTopLevelDecl(DeclGroupRef D) override {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
if (!Class->isThisDeclarationADefinition()) {
RewriteForwardClassDecl(D);
break;
} else {
// Keep track of all interface declarations seen.
ObjCInterfacesSeen.push_back(Class);
break;
}
}
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
if (!Proto->isThisDeclarationADefinition()) {
RewriteForwardProtocolDecl(D);
break;
}
}
if (FunctionDecl *FDecl = dyn_cast<FunctionDecl>(*I)) {
// Under modern abi, we cannot translate body of the function
// yet until all class extensions and its implementation is seen.
// This is because they may introduce new bitfields which must go
// into their grouping struct.
if (FDecl->isThisDeclarationADefinition() &&
// Not c functions defined inside an objc container.
!FDecl->isTopLevelDeclInObjCContainer()) {
FunctionDefinitionsSeen.push_back(FDecl);
break;
}
}
HandleTopLevelSingleDecl(*I);
}
return true;
}
void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(*I)) {
if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
else
RewriteObjCQualifiedInterfaceTypes(TD);
}
}
return;
}
void HandleTopLevelSingleDecl(Decl *D);
void HandleDeclInMainFile(Decl *D);
RewriteModernObjC(std::string inFile, raw_ostream *OS,
DiagnosticsEngine &D, const LangOptions &LOpts,
bool silenceMacroWarn, bool LineInfo);
~RewriteModernObjC() override {}
void HandleTranslationUnit(ASTContext &C) override;
void ReplaceStmt(Stmt *Old, Stmt *New) {
ReplaceStmtWithRange(Old, New, Old->getSourceRange());
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
assert(Old != nullptr && New != nullptr && "Expected non-null Stmt's");
Stmt *ReplacingStmt = ReplacedNodes[Old];
if (ReplacingStmt)
return; // We can't rewrite the same node twice.
if (DisableReplaceStmt)
return;
// Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
<< Old->getSourceRange();
return;
}
// Get the new text.
std::string SStr;
llvm::raw_string_ostream S(SStr);
New->printPretty(S, nullptr, PrintingPolicy(LangOpts));
const std::string &Str = S.str();
// If replacement succeeded or warning disabled return with no warning.
if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
ReplacedNodes[Old] = New;
return;
}
if (SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
<< Old->getSourceRange();
}
void InsertText(SourceLocation Loc, StringRef Str,
bool InsertAfter = true) {
// If insertion succeeded or warning disabled return with no warning.
if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
}
void ReplaceText(SourceLocation Start, unsigned OrigLength,
StringRef Str) {
// If removal succeeded or warning disabled return with no warning.
if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
SilenceRewriteMacroWarning)
return;
Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
}
// Syntactic Rewriting.
void RewriteRecordBody(RecordDecl *RD);
void RewriteInclude();
void RewriteLineDirective(const Decl *D);
void ConvertSourceLocationToLineDirective(SourceLocation Loc,
std::string &LineString);
void RewriteForwardClassDecl(DeclGroupRef D);
void RewriteForwardClassDecl(const SmallVectorImpl<Decl *> &DG);
void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString);
void RewriteImplementations();
void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID);
void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
void RewriteImplementationDecl(Decl *Dcl);
void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
ObjCMethodDecl *MDecl, std::string &ResultStr);
void RewriteTypeIntoString(QualType T, std::string &ResultStr,
const FunctionType *&FPRetType);
void RewriteByRefString(std::string &ResultStr, const std::string &Name,
ValueDecl *VD, bool def=false);
void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
void RewriteForwardProtocolDecl(DeclGroupRef D);
void RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG);
void RewriteMethodDeclaration(ObjCMethodDecl *Method);
void RewriteProperty(ObjCPropertyDecl *prop);
void RewriteFunctionDecl(FunctionDecl *FD);
void RewriteBlockPointerType(std::string& Str, QualType Type);
void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
void RewriteTypeOfDecl(VarDecl *VD);
void RewriteObjCQualifiedInterfaceTypes(Expr *E);
std::string getIvarAccessString(ObjCIvarDecl *D);
// Expression Rewriting.
Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp);
Stmt *RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp);
Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp);
Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp);
Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
Stmt *RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd);
Stmt *RewriteBreakStmt(BreakStmt *S);
Stmt *RewriteContinueStmt(ContinueStmt *S);
void RewriteCastExpr(CStyleCastExpr *CE);
void RewriteImplicitCastObjCExpr(CastExpr *IE);
void RewriteLinkageSpec(LinkageSpecDecl *LSD);
// Computes ivar bitfield group no.
unsigned ObjCIvarBitfieldGroupNo(ObjCIvarDecl *IV);
// Names field decl. for ivar bitfield group.
void ObjCIvarBitfieldGroupDecl(ObjCIvarDecl *IV, std::string &Result);
// Names struct type for ivar bitfield group.
void ObjCIvarBitfieldGroupType(ObjCIvarDecl *IV, std::string &Result);
// Names symbol for ivar bitfield group field offset.
void ObjCIvarBitfieldGroupOffset(ObjCIvarDecl *IV, std::string &Result);
// Given an ivar bitfield, it builds (or finds) its group record type.
QualType GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV);
QualType SynthesizeBitfieldGroupStructType(
ObjCIvarDecl *IV,
SmallVectorImpl<ObjCIvarDecl *> &IVars);
// Block rewriting.
void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
// Block specific rewrite rules.
void RewriteBlockPointerDecl(NamedDecl *VD);
void RewriteByRefVar(VarDecl *VD, bool firstDecl, bool lastDecl);
Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result);
void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result);
bool IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, TagDecl *Tag,
bool &IsNamedDefinition);
void RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
std::string &Result);
bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result);
void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
std::string &Result);
void Initialize(ASTContext &context) override;
// Misc. AST transformation routines. Sometimes they end up calling
// rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
Expr **args, unsigned nargs,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
Expr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
QualType returnType,
SmallVectorImpl<QualType> &ArgTypes,
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method);
Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
void SynthCountByEnumWithState(std::string &buf);
void SynthMsgSendFunctionDecl();
void SynthMsgSendSuperFunctionDecl();
void SynthMsgSendStretFunctionDecl();
void SynthMsgSendFpretFunctionDecl();
void SynthMsgSendSuperStretFunctionDecl();
void SynthGetClassFunctionDecl();
void SynthGetMetaClassFunctionDecl();
void SynthGetSuperClassFunctionDecl();
void SynthSelGetUidFunctionDecl();
void SynthSuperConstructorFunctionDecl();
// Rewriting metadata
template<typename MethodIterator>
void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
MethodIterator MethodEnd,
bool IsInstanceMethod,
StringRef prefix,
StringRef ClassName,
std::string &Result);
void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
std::string &Result);
void RewriteObjCProtocolListMetaData(
const ObjCList<ObjCProtocolDecl> &Prots,
StringRef prefix, StringRef ClassName, std::string &Result);
void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result);
void RewriteClassSetupInitHook(std::string &Result);
void RewriteMetaDataIntoBuffer(std::string &Result);
void WriteImageInfo(std::string &Result);
void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
std::string &Result);
void RewriteCategorySetupInitHook(std::string &Result);
// Rewriting ivar
void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result);
Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
StringRef funcName, std::string Tag);
std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
StringRef funcName, std::string Tag);
std::string SynthesizeBlockImpl(BlockExpr *CE,
std::string Tag, std::string Desc);
std::string SynthesizeBlockDescriptor(std::string DescTag,
std::string ImplTag,
int i, StringRef funcName,
unsigned hasCopy);
Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
void SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName);
FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
Stmt *SynthBlockInitExpr(BlockExpr *Exp,
const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs);
// Misc. helper routines.
QualType getProtocolType();
void WarnAboutReturnGotoStmts(Stmt *S);
void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
bool IsDeclStmtInForeachHeader(DeclStmt *DS);
void CollectBlockDeclRefInfo(BlockExpr *Exp);
void GetBlockDeclRefExprs(Stmt *S);
void GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
// canonical type. We only care if the top-level type is a closure pointer.
bool isTopLevelBlockPointerType(QualType T) {
return isa<BlockPointerType>(T);
}
/// convertBlockPointerToFunctionPointer - Converts a block-pointer type
/// to a function pointer type and upon success, returns true; false
/// otherwise.
bool convertBlockPointerToFunctionPointer(QualType &T) {
if (isTopLevelBlockPointerType(T)) {
const BlockPointerType *BPT = T->getAs<BlockPointerType>();
T = Context->getPointerType(BPT->getPointeeType());
return true;
}
return false;
}
bool convertObjCTypeToCStyleType(QualType &T);
bool needToScanForQualifiers(QualType T);
QualType getSuperStructType();
QualType getConstantStringStructType();
QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
void convertToUnqualifiedObjCType(QualType &T) {
if (T->isObjCQualifiedIdType()) {
bool isConst = T.isConstQualified();
T = isConst ? Context->getObjCIdType().withConst()
: Context->getObjCIdType();
}
else if (T->isObjCQualifiedClassType())
T = Context->getObjCClassType();
else if (T->isObjCObjectPointerType() &&
T->getPointeeType()->isObjCQualifiedInterfaceType()) {
if (const ObjCObjectPointerType * OBJPT =
T->getAsObjCInterfacePointerType()) {
const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
T = QualType(IFaceT, 0);
T = Context->getPointerType(T);
}
}
}
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
OCT == Context->getCanonicalType(Context->getObjCClassType()))
return true;
if (const PointerType *PT = OCT->getAs<PointerType>()) {
if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
PT->getPointeeType()->isObjCQualifiedIdType())
return true;
}
return false;
}
bool PointerTypeTakesAnyBlockArguments(QualType QT);
bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
void GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen);
void QuoteDoublequotes(std::string &From, std::string &To) {
for (unsigned i = 0; i < From.length(); i++) {
if (From[i] == '"')
To += "\\\"";
else
To += From[i];
}
}
QualType getSimpleFunctionType(QualType result,
ArrayRef<QualType> args,
bool variadic = false) {
if (result == Context->getObjCInstanceType())
result = Context->getObjCIdType();
FunctionProtoType::ExtProtoInfo fpi;
fpi.Variadic = variadic;
return Context->getFunctionType(result, args, fpi);
}
// Helper function: create a CStyleCastExpr with trivial type source info.
CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
TInfo, SourceLocation(), SourceLocation());
}
bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
IdentifierInfo* II = &Context->Idents.get("load");
Selector LoadSel = Context->Selectors.getSelector(0, &II);
return OD->getClassMethod(LoadSel) != nullptr;
}
StringLiteral *getStringLiteral(StringRef Str) {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), ArrayType::Normal,
0);
return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
/*Pascal=*/false, StrType, SourceLocation());
}
};
}
void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
NamedDecl *D) {
if (const FunctionProtoType *fproto
= dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
for (const auto &I : fproto->param_types())
if (isTopLevelBlockPointerType(I)) {
// All the args are checked/rewritten. Don't call twice!
RewriteBlockPointerDecl(D);
break;
}
}
}
void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
const PointerType *PT = funcType->getAs<PointerType>();
if (PT && PointerTypeTakesAnyBlockArguments(funcType))
RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
}
static bool IsHeaderFile(const std::string &Filename) {
std::string::size_type DotPos = Filename.rfind('.');
if (DotPos == std::string::npos) {
// no file extension
return false;
}
std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";
}
RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS,
DiagnosticsEngine &D, const LangOptions &LOpts,
bool silenceMacroWarn,
bool LineInfo)
: Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
SilenceRewriteMacroWarning(silenceMacroWarn), GenerateLineInfo(LineInfo) {
IsHeader = IsHeaderFile(inFile);
RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
"rewriting sub-expression within a macro (may not be correct)");
// FIXME. This should be an error. But if block is not called, it is OK. And it
// may break including some headers.
GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
"rewriting block literal declared in global scope is not implemented");
TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
DiagnosticsEngine::Warning,
"rewriter doesn't support user-specified control flow semantics "
"for @try/@finally (code may not execute properly)");
}
std::unique_ptr<ASTConsumer> clang::CreateModernObjCRewriter(
const std::string &InFile, raw_ostream *OS, DiagnosticsEngine &Diags,
const LangOptions &LOpts, bool SilenceRewriteMacroWarning, bool LineInfo) {
return llvm::make_unique<RewriteModernObjC>(
InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning, LineInfo);
}
void RewriteModernObjC::InitializeCommon(ASTContext &context) {
Context = &context;
SM = &Context->getSourceManager();
TUDecl = Context->getTranslationUnitDecl();
MsgSendFunctionDecl = nullptr;
MsgSendSuperFunctionDecl = nullptr;
MsgSendStretFunctionDecl = nullptr;
MsgSendSuperStretFunctionDecl = nullptr;
MsgSendFpretFunctionDecl = nullptr;
GetClassFunctionDecl = nullptr;
GetMetaClassFunctionDecl = nullptr;
GetSuperClassFunctionDecl = nullptr;
SelGetUidFunctionDecl = nullptr;
CFStringFunctionDecl = nullptr;
ConstantStringClassReference = nullptr;
NSStringRecord = nullptr;
CurMethodDef = nullptr;
CurFunctionDef = nullptr;
GlobalVarDecl = nullptr;
GlobalConstructionExp = nullptr;
SuperStructDecl = nullptr;
ProtocolTypeDecl = nullptr;
ConstantStringDecl = nullptr;
BcLabelCount = 0;
SuperConstructorFunctionDecl = nullptr;
NumObjCStringLiterals = 0;
PropParentMap = nullptr;
CurrentBody = nullptr;
DisableReplaceStmt = false;
objc_impl_method = false;
// Get the ID and start/end of the main file.
MainFileID = SM->getMainFileID();
const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
MainFileStart = MainBuf->getBufferStart();
MainFileEnd = MainBuf->getBufferEnd();
Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
}
//===----------------------------------------------------------------------===//
// Top Level Driver Code
//===----------------------------------------------------------------------===//
void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
if (Diags.hasErrorOccurred())
return;
// Two cases: either the decl could be in the main file, or it could be in a
// #included file. If the former, rewrite it now. If the later, check to see
// if we rewrote the #include/#import.
SourceLocation Loc = D->getLocation();
Loc = SM->getExpansionLoc(Loc);
// If this is for a builtin, ignore it.
if (Loc.isInvalid()) return;
// Look for built-in declarations that we need to refer during the rewrite.
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
RewriteFunctionDecl(FD);
} else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
// declared in <Foundation/NSString.h>
if (FVD->getName() == "_NSConstantStringClassReference") {
ConstantStringClassReference = FVD;
return;
}
} else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
RewriteCategoryDecl(CD);
} else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
if (PD->isThisDeclarationADefinition())
RewriteProtocolDecl(PD);
} else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
// FIXME. This will not work in all situations and leaving it out
// is harmless.
// RewriteLinkageSpec(LSD);
// Recurse into linkage specifications
for (DeclContext::decl_iterator DI = LSD->decls_begin(),
DIEnd = LSD->decls_end();
DI != DIEnd; ) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
if (!IFace->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
SourceLocation StartLoc = IFace->getLocStart();
do {
if (isa<ObjCInterfaceDecl>(*DI) &&
!cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
StartLoc == (*DI)->getLocStart())
DG.push_back(*DI);
else
break;
++DI;
} while (DI != DIEnd);
RewriteForwardClassDecl(DG);
continue;
}
else {
// Keep track of all interface declarations seen.
ObjCInterfacesSeen.push_back(IFace);
++DI;
continue;
}
}
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
if (!Proto->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
SourceLocation StartLoc = Proto->getLocStart();
do {
if (isa<ObjCProtocolDecl>(*DI) &&
!cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
StartLoc == (*DI)->getLocStart())
DG.push_back(*DI);
else
break;
++DI;
} while (DI != DIEnd);
RewriteForwardProtocolDecl(DG);
continue;
}
}
HandleTopLevelSingleDecl(*DI);
++DI;
}
}
// If we have a decl in the main file, see if we should rewrite it.
if (SM->isWrittenInMainFile(Loc))
return HandleDeclInMainFile(D);
}
//===----------------------------------------------------------------------===//
// Syntactic (non-AST) Rewriting Code
//===----------------------------------------------------------------------===//
void RewriteModernObjC::RewriteInclude() {
SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
StringRef MainBuf = SM->getBufferData(MainFileID);
const char *MainBufStart = MainBuf.begin();
const char *MainBufEnd = MainBuf.end();
size_t ImportLen = strlen("import");
// Loop over the whole file, looking for includes.
for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
if (*BufPtr == '#') {
if (++BufPtr == MainBufEnd)
return;
while (*BufPtr == ' ' || *BufPtr == '\t')
if (++BufPtr == MainBufEnd)
return;
if (!strncmp(BufPtr, "import", ImportLen)) {
// replace import with include
SourceLocation ImportLoc =
LocStart.getLocWithOffset(BufPtr-MainBufStart);
ReplaceText(ImportLoc, ImportLen, "include");
BufPtr += ImportLen;
}
}
}
}
static void WriteInternalIvarName(const ObjCInterfaceDecl *IDecl,
ObjCIvarDecl *IvarDecl, std::string &Result) {
Result += "OBJC_IVAR_$_";
Result += IDecl->getName();
Result += "$";
Result += IvarDecl->getName();
}
std::string
RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
const ObjCInterfaceDecl *ClassDecl = D->getContainingInterface();
// Build name of symbol holding ivar offset.
std::string IvarOffsetName;
if (D->isBitField())
ObjCIvarBitfieldGroupOffset(D, IvarOffsetName);
else
WriteInternalIvarName(ClassDecl, D, IvarOffsetName);
std::string S = "(*(";
QualType IvarT = D->getType();
if (D->isBitField())
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
ObjCContainerDecl *CDecl =
dyn_cast<ObjCContainerDecl>(D->getDeclContext());
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
std::string RecName = CDecl->getName();
RecName += "_IMPL";
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(RecName.c_str()));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, 0),
Context->UnsignedIntTy, SourceLocation());
Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Zero);
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get(D->getNameAsString()),
IvarT, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
IvarT = Context->getDecltypeType(ME, ME->getType());
}
}
convertObjCTypeToCStyleType(IvarT);
QualType castT = Context->getPointerType(IvarT);
std::string TypeString(castT.getAsString(Context->getPrintingPolicy()));
S += TypeString;
S += ")";
// ((char *)self + IVAR_OFFSET_SYMBOL_NAME)
S += "((char *)self + ";
S += IvarOffsetName;
S += "))";
if (D->isBitField()) {
S += ".";
S += D->getNameAsString();
}
ReferencedIvars[const_cast<ObjCInterfaceDecl *>(ClassDecl)].insert(D);
return S;
}
/// mustSynthesizeSetterGetterMethod - returns true if setter or getter has not
/// been found in the class implementation. In this case, it must be synthesized.
static bool mustSynthesizeSetterGetterMethod(ObjCImplementationDecl *IMP,
ObjCPropertyDecl *PD,
bool getter) {
return getter ? !IMP->getInstanceMethod(PD->getGetterName())
: !IMP->getInstanceMethod(PD->getSetterName());
}
void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID) {
static bool objcGetPropertyDefined = false;
static bool objcSetPropertyDefined = false;
SourceLocation startGetterSetterLoc;
if (PID->getLocStart().isValid()) {
SourceLocation startLoc = PID->getLocStart();
InsertText(startLoc, "// ");
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synthesize location");
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "@synthesize: can't find ';'");
startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
}
else
startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd();
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
return; // FIXME: is this correct?
// Generate the 'getter' function.
ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
assert(IMD && OID && "Synthesized ivars must be attached to @implementation");
unsigned Attributes = PD->getPropertyAttributes();
if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) {
bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
(Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
Getr = "\nextern \"C\" __declspec(dllimport) "
"id objc_getProperty(id, SEL, long, bool);\n";
}
RewriteObjCMethodDecl(OID->getContainingInterface(),
PD->getGetterMethodDecl(), Getr);
Getr += "{ ";
// Synthesize an explicit cast to gain access to the ivar.
// See objc-act.c:objc_synthesize_new_getter() for details.
if (GenGetProperty) {
// return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
Getr += "typedef ";
const FunctionType *FPRetType = nullptr;
RewriteTypeIntoString(PD->getGetterMethodDecl()->getReturnType(), Getr,
FPRetType);
Getr += " _TYPE";
if (FPRetType) {
Getr += ")"; // close the precedence "scope" for "*".
// Now, emit the argument types (if any).
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
Getr += "(";
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
if (i) Getr += ", ";
std::string ParamStr =
FT->getParamType(i).getAsString(Context->getPrintingPolicy());
Getr += ParamStr;
}
if (FT->isVariadic()) {
if (FT->getNumParams())
Getr += ", ";
Getr += "...";
}
Getr += ")";
} else
Getr += "()";
}
Getr += ";\n";
Getr += "return (_TYPE)";
Getr += "objc_getProperty(self, _cmd, ";
RewriteIvarOffsetComputation(OID, Getr);
Getr += ", 1)";
}
else
Getr += "return " + getIvarAccessString(OID);
Getr += "; }";
InsertText(startGetterSetterLoc, Getr);
}
if (PD->isReadOnly() ||
!mustSynthesizeSetterGetterMethod(IMD, PD, false /*setter*/))
return;
// Generate the 'setter' function.
std::string Setr;
bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
Setr = "\nextern \"C\" __declspec(dllimport) "
"void objc_setProperty (id, SEL, long, id, bool, bool);\n";
}
RewriteObjCMethodDecl(OID->getContainingInterface(),
PD->getSetterMethodDecl(), Setr);
Setr += "{ ";
// Synthesize an explicit cast to initialize the ivar.
// See objc-act.c:objc_synthesize_new_setter() for details.
if (GenSetProperty) {
Setr += "objc_setProperty (self, _cmd, ";
RewriteIvarOffsetComputation(OID, Setr);
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
Setr += "1)";
else
Setr += "0)";
}
else {
Setr += getIvarAccessString(OID) + " = ";
Setr += PD->getName();
}
Setr += "; }\n";
InsertText(startGetterSetterLoc, Setr);
}
static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
std::string &typedefString) {
typedefString += "\n#ifndef _REWRITER_typedef_";
typedefString += ForwardDecl->getNameAsString();
typedefString += "\n";
typedefString += "#define _REWRITER_typedef_";
typedefString += ForwardDecl->getNameAsString();
typedefString += "\n";
typedefString += "typedef struct objc_object ";
typedefString += ForwardDecl->getNameAsString();
// typedef struct { } _objc_exc_Classname;
typedefString += ";\ntypedef struct {} _objc_exc_";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";\n#endif\n";
}
void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString) {
SourceLocation startLoc = ClassDecl->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiPtr = strchr(startBuf, ';');
// Replace the @class with typedefs corresponding to the classes.
ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
}
void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) {
std::string typedefString;
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
if (ObjCInterfaceDecl *ForwardDecl = dyn_cast<ObjCInterfaceDecl>(*I)) {
if (I == D.begin()) {
// Translate to typedef's that forward reference structs with the same name
// as the class. As a convenience, we include the original declaration
// as a comment.
typedefString += "// @class ";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";";
}
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
else
HandleTopLevelSingleDecl(*I);
}
DeclGroupRef::iterator I = D.begin();
RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
}
void RewriteModernObjC::RewriteForwardClassDecl(
const SmallVectorImpl<Decl *> &D) {
std::string typedefString;
for (unsigned i = 0; i < D.size(); i++) {
ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
if (i == 0) {
typedefString += "// @class ";
typedefString += ForwardDecl->getNameAsString();
typedefString += ";";
}
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
}
void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
// When method is a synthesized one, such as a getter/setter there is
// nothing to rewrite.
if (Method->isImplicit())
return;
SourceLocation LocStart = Method->getLocStart();
SourceLocation LocEnd = Method->getLocEnd();
if (SM->getExpansionLineNumber(LocEnd) >
SM->getExpansionLineNumber(LocStart)) {
InsertText(LocStart, "#if 0\n");
ReplaceText(LocEnd, 1, ";\n#endif\n");
} else {
InsertText(LocStart, "// ");
}
}
void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) {
SourceLocation Loc = prop->getAtLoc();
ReplaceText(Loc, 0, "// ");
// FIXME: handle properties that are declared across multiple lines.
}
void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
SourceLocation LocStart = CatDecl->getLocStart();
// FIXME: handle category headers that are declared across multiple lines.
if (CatDecl->getIvarRBraceLoc().isValid()) {
ReplaceText(LocStart, 1, "/** ");
ReplaceText(CatDecl->getIvarRBraceLoc(), 1, "**/ ");
}
else {
ReplaceText(LocStart, 0, "// ");
}
for (auto *I : CatDecl->properties())
RewriteProperty(I);
for (auto *I : CatDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : CatDecl->class_methods())
RewriteMethodDeclaration(I);
// Lastly, comment out the @end.
ReplaceText(CatDecl->getAtEndRange().getBegin(),
strlen("@end"), "/* @end */\n");
}
void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
SourceLocation LocStart = PDecl->getLocStart();
assert(PDecl->isThisDeclarationADefinition());
// FIXME: handle protocol headers that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
for (auto *I : PDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : PDecl->class_methods())
RewriteMethodDeclaration(I);
for (auto *I : PDecl->properties())
RewriteProperty(I);
// Lastly, comment out the @end.
SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
ReplaceText(LocEnd, strlen("@end"), "/* @end */\n");
// Must comment out @optional/@required
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
for (const char *p = startBuf; p < endBuf; p++) {
if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
}
else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
}
}
}
void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
SourceLocation LocStart = (*D.begin())->getLocStart();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
}
void
RewriteModernObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
SourceLocation LocStart = DG[0]->getLocStart();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
}
void
RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) {
SourceLocation LocStart = LSD->getExternLoc();
if (LocStart.isInvalid())
llvm_unreachable("Invalid extern SourceLocation");
ReplaceText(LocStart, 0, "// ");
if (!LSD->hasBraces())
return;
// FIXME. We don't rewrite well if '{' is not on same line as 'extern'.
SourceLocation LocRBrace = LSD->getRBraceLoc();
if (LocRBrace.isInvalid())
llvm_unreachable("Invalid rbrace SourceLocation");
ReplaceText(LocRBrace, 0, "// ");
}
void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
const FunctionType *&FPRetType) {
if (T->isObjCQualifiedIdType())
ResultStr += "id";
else if (T->isFunctionPointerType() ||
T->isBlockPointerType()) {
// needs special handling, since pointer-to-functions have special
// syntax (where a decaration models use).
QualType retType = T;
QualType PointeeTy;
if (const PointerType* PT = retType->getAs<PointerType>())
PointeeTy = PT->getPointeeType();
else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
PointeeTy = BPT->getPointeeType();
if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
ResultStr +=
FPRetType->getReturnType().getAsString(Context->getPrintingPolicy());
ResultStr += "(*";
}
} else
ResultStr += T.getAsString(Context->getPrintingPolicy());
}
void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
ObjCMethodDecl *OMD,
std::string &ResultStr) {
//fprintf(stderr,"In RewriteObjCMethodDecl\n");
const FunctionType *FPRetType = nullptr;
ResultStr += "\nstatic ";
RewriteTypeIntoString(OMD->getReturnType(), ResultStr, FPRetType);
ResultStr += " ";
// Unique method name
std::string NameStr;
if (OMD->isInstanceMethod())
NameStr += "_I_";
else
NameStr += "_C_";
NameStr += IDecl->getNameAsString();
NameStr += "_";
if (ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
NameStr += CID->getNameAsString();
NameStr += "_";
}
// Append selector names, replacing ':' with '_'
{
std::string selString = OMD->getSelector().getAsString();
int len = selString.size();
for (int i = 0; i < len; i++)
if (selString[i] == ':')
selString[i] = '_';
NameStr += selString;
}
// Remember this name for metadata emission
MethodInternalNames[OMD] = NameStr;
ResultStr += NameStr;
// Rewrite arguments
ResultStr += "(";
// invisible arguments
if (OMD->isInstanceMethod()) {
QualType selfTy = Context->getObjCInterfaceType(IDecl);
selfTy = Context->getPointerType(selfTy);
if (!LangOpts.MicrosoftExt) {
if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
ResultStr += "struct ";
}
// When rewriting for Microsoft, explicitly omit the structure name.
ResultStr += IDecl->getNameAsString();
ResultStr += " *";
}
else
ResultStr += Context->getObjCClassType().getAsString(
Context->getPrintingPolicy());
ResultStr += " self, ";
ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
ResultStr += " _cmd";
// Method arguments.
for (const auto *PDecl : OMD->params()) {
ResultStr += ", ";
if (PDecl->getType()->isObjCQualifiedIdType()) {
ResultStr += "id ";
ResultStr += PDecl->getNameAsString();
} else {
std::string Name = PDecl->getNameAsString();
QualType QT = PDecl->getType();
// Make sure we convert "t (^)(...)" to "t (*)(...)".
(void)convertBlockPointerToFunctionPointer(QT);
QT.getAsStringInternal(Name, Context->getPrintingPolicy());
ResultStr += Name;
}
}
if (OMD->isVariadic())
ResultStr += ", ...";
ResultStr += ") ";
if (FPRetType) {
ResultStr += ")"; // close the precedence "scope" for "*".
// Now, emit the argument types (if any).
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
ResultStr += "(";
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
if (i) ResultStr += ", ";
std::string ParamStr =
FT->getParamType(i).getAsString(Context->getPrintingPolicy());
ResultStr += ParamStr;
}
if (FT->isVariadic()) {
if (FT->getNumParams())
ResultStr += ", ";
ResultStr += "...";
}
ResultStr += ")";
} else {
ResultStr += "()";
}
}
}
void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
if (IMD) {
if (IMD->getIvarRBraceLoc().isValid()) {
ReplaceText(IMD->getLocStart(), 1, "/** ");
ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ ");
}
else {
InsertText(IMD->getLocStart(), "// ");
}
}
else
InsertText(CID->getLocStart(), "// ");
for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
ReplaceText(LocStart, endBuf-startBuf, ResultStr);
}
for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
SourceLocation LocStart = OMD->getLocStart();
SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
ReplaceText(LocStart, endBuf-startBuf, ResultStr);
}
for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
RewritePropertyImplDecl(I, IMD, CID);
InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
}
void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
// Do not synthesize more than once.
if (ObjCSynthesizedStructs.count(ClassDecl))
return;
// Make sure super class's are written before current class is written.
ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass();
while (SuperClass) {
RewriteInterfaceDecl(SuperClass);
SuperClass = SuperClass->getSuperClass();
}
std::string ResultStr;
if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) {
// we haven't seen a forward decl - generate a typedef.
RewriteOneForwardClassDecl(ClassDecl, ResultStr);
RewriteIvarOffsetSymbols(ClassDecl, ResultStr);
RewriteObjCInternalStruct(ClassDecl, ResultStr);
// Mark this typedef as having been written into its c++ equivalent.
ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl());
for (auto *I : ClassDecl->properties())
RewriteProperty(I);
for (auto *I : ClassDecl->instance_methods())
RewriteMethodDeclaration(I);
for (auto *I : ClassDecl->class_methods())
RewriteMethodDeclaration(I);
// Lastly, comment out the @end.
ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
"/* @end */\n");
}
}
Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
SourceRange OldRange = PseudoOp->getSourceRange();
// We just magically know some things about the structure of this
// expression.
ObjCMessageExpr *OldMsg =
cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
PseudoOp->getNumSemanticExprs() - 1));
// Because the rewriter doesn't allow us to rewrite rewritten code,
// we need to suppress rewriting the sub-statements.
Expr *Base;
SmallVector<Expr*, 2> Args;
{
DisableReplaceStmtScope S(*this);
// Rebuild the base expression if we have one.
Base = nullptr;
if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
Base = OldMsg->getInstanceReceiver();
Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
unsigned numArgs = OldMsg->getNumArgs();
for (unsigned i = 0; i < numArgs; i++) {
Expr *Arg = OldMsg->getArg(i);
if (isa<OpaqueValueExpr>(Arg))
Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
Args.push_back(Arg);
}
}
// TODO: avoid this copy.
SmallVector<SourceLocation, 1> SelLocs;
OldMsg->getSelectorLocs(SelLocs);
ObjCMessageExpr *NewMsg = nullptr;
switch (OldMsg->getReceiverKind()) {
case ObjCMessageExpr::Class:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getClassReceiverTypeInfo(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::Instance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
Base,
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getSuperLoc(),
OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
OldMsg->getSuperType(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
}
Stmt *Replacement = SynthMessageExpr(NewMsg);
ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
return Replacement;
}
Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
SourceRange OldRange = PseudoOp->getSourceRange();
// We just magically know some things about the structure of this
// expression.
ObjCMessageExpr *OldMsg =
cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
// Because the rewriter doesn't allow us to rewrite rewritten code,
// we need to suppress rewriting the sub-statements.
Expr *Base = nullptr;
SmallVector<Expr*, 1> Args;
{
DisableReplaceStmtScope S(*this);
// Rebuild the base expression if we have one.
if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
Base = OldMsg->getInstanceReceiver();
Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
unsigned numArgs = OldMsg->getNumArgs();
for (unsigned i = 0; i < numArgs; i++) {
Expr *Arg = OldMsg->getArg(i);
if (isa<OpaqueValueExpr>(Arg))
Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
Args.push_back(Arg);
}
}
// Intentionally empty.
SmallVector<SourceLocation, 1> SelLocs;
ObjCMessageExpr *NewMsg = nullptr;
switch (OldMsg->getReceiverKind()) {
case ObjCMessageExpr::Class:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getClassReceiverTypeInfo(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::Instance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
Base,
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance:
NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
OldMsg->getValueKind(),
OldMsg->getLeftLoc(),
OldMsg->getSuperLoc(),
OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
OldMsg->getSuperType(),
OldMsg->getSelector(),
SelLocs,
OldMsg->getMethodDecl(),
Args,
OldMsg->getRightLoc(),
OldMsg->isImplicit());
break;
}
Stmt *Replacement = SynthMessageExpr(NewMsg);
ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
return Replacement;
}
/// SynthCountByEnumWithState - To print:
/// ((NSUInteger (*)
/// (id, SEL, struct __objcFastEnumerationState *, id *, NSUInteger))
/// (void *)objc_msgSend)((id)l_collection,
/// sel_registerName(
/// "countByEnumeratingWithState:objects:count:"),
/// &enumState,
/// (id *)__rw_items, (NSUInteger)16)
///
void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) {
buf += "((_WIN_NSUInteger (*) (id, SEL, struct __objcFastEnumerationState *, "
"id *, _WIN_NSUInteger))(void *)objc_msgSend)";
buf += "\n\t\t";
buf += "((id)l_collection,\n\t\t";
buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
buf += "\n\t\t";
buf += "&enumState, "
"(id *)__rw_items, (_WIN_NSUInteger)16)";
}
/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
/// statement to exit to its outer synthesized loop.
///
Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
return S;
// replace break with goto __break_label
std::string buf;
SourceLocation startLoc = S->getLocStart();
buf = "goto __break_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("break"), buf);
return nullptr;
}
void RewriteModernObjC::ConvertSourceLocationToLineDirective(
SourceLocation Loc,
std::string &LineString) {
if (Loc.isFileID() && GenerateLineInfo) {
LineString += "\n#line ";
PresumedLoc PLoc = SM->getPresumedLoc(Loc);
LineString += utostr(PLoc.getLine());
LineString += " \"";
LineString += Lexer::Stringify(PLoc.getFilename());
LineString += "\"\n";
}
}
/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
/// statement to continue with its inner synthesized loop.
///
Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) {
if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
return S;
// replace continue with goto __continue_label
std::string buf;
SourceLocation startLoc = S->getLocStart();
buf = "goto __continue_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("continue"), buf);
return nullptr;
}
/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
/// It rewrites:
/// for ( type elem in collection) { stmts; }
/// Into:
/// {
/// type elem;
/// struct __objcFastEnumerationState enumState = { 0 };
/// id __rw_items[16];
/// id l_collection = (id)collection;
/// NSUInteger limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16];
/// if (limit) {
/// unsigned long startMutations = *enumState.mutationsPtr;
/// do {
/// unsigned long counter = 0;
/// do {
/// if (startMutations != *enumState.mutationsPtr)
/// objc_enumerationMutation(l_collection);
/// elem = (type)enumState.itemsPtr[counter++];
/// stmts;
/// __continue_label: ;
/// } while (counter < limit);
/// } while ((limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16]));
/// elem = nil;
/// __break_label: ;
/// }
/// else
/// elem = nil;
/// }
///
Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd) {
assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
"ObjCForCollectionStmt Statement stack mismatch");
assert(!ObjCBcLabelNo.empty() &&
"ObjCForCollectionStmt - Label No stack empty");
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
StringRef elementName;
std::string elementTypeAsString;
std::string buf;
// line directive first.
SourceLocation ForEachLoc = S->getForLoc();
ConvertSourceLocationToLineDirective(ForEachLoc, buf);
buf += "{\n\t";
if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
// type elem;
NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
QualType ElementType = cast<ValueDecl>(D)->getType();
if (ElementType->isObjCQualifiedIdType() ||
ElementType->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
buf += elementTypeAsString;
buf += " ";
elementName = D->getName();
buf += elementName;
buf += ";\n\t";
}
else {
DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
elementName = DR->getDecl()->getName();
ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
if (VD->getType()->isObjCQualifiedIdType() ||
VD->getType()->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
}
// struct __objcFastEnumerationState enumState = { 0 };
buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
// id __rw_items[16];
buf += "id __rw_items[16];\n\t";
// id l_collection = (id)
buf += "id l_collection = (id)";
// Find start location of 'collection' the hard way!
const char *startCollectionBuf = startBuf;
startCollectionBuf += 3; // skip 'for'
startCollectionBuf = strchr(startCollectionBuf, '(');
startCollectionBuf++; // skip '('
// find 'in' and skip it.
while (*startCollectionBuf != ' ' ||
*(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
(*(startCollectionBuf+3) != ' ' &&
*(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
startCollectionBuf++;
startCollectionBuf += 3;
// Replace: "for (type element in" with string constructed thus far.
ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
// Replace ')' in for '(' type elem in collection ')' with ';'
SourceLocation rightParenLoc = S->getRParenLoc();
const char *rparenBuf = SM->getCharacterData(rightParenLoc);
SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
buf = ";\n\t";
// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
// objects:__rw_items count:16];
// which is synthesized into:
// NSUInteger limit =
// ((NSUInteger (*)
// (id, SEL, struct __objcFastEnumerationState *, id *, NSUInteger))
// (void *)objc_msgSend)((id)l_collection,
// sel_registerName(
// "countByEnumeratingWithState:objects:count:"),
// (struct __objcFastEnumerationState *)&state,
// (id *)__rw_items, (NSUInteger)16);
buf += "_WIN_NSUInteger limit =\n\t\t";
SynthCountByEnumWithState(buf);
buf += ";\n\t";
/// if (limit) {
/// unsigned long startMutations = *enumState.mutationsPtr;
/// do {
/// unsigned long counter = 0;
/// do {
/// if (startMutations != *enumState.mutationsPtr)
/// objc_enumerationMutation(l_collection);
/// elem = (type)enumState.itemsPtr[counter++];
buf += "if (limit) {\n\t";
buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
buf += "do {\n\t\t";
buf += "unsigned long counter = 0;\n\t\t";
buf += "do {\n\t\t\t";
buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
buf += elementName;
buf += " = (";
buf += elementTypeAsString;
buf += ")enumState.itemsPtr[counter++];";
// Replace ')' in for '(' type elem in collection ')' with all of these.
ReplaceText(lparenLoc, 1, buf);
/// __continue_label: ;
/// } while (counter < limit);
/// } while ((limit = [l_collection countByEnumeratingWithState:&enumState
/// objects:__rw_items count:16]));
/// elem = nil;
/// __break_label: ;
/// }
/// else
/// elem = nil;
/// }
///
buf = ";\n\t";
buf += "__continue_label_";
buf += utostr(ObjCBcLabelNo.back());
buf += ": ;";
buf += "\n\t\t";
buf += "} while (counter < limit);\n\t";
buf += "} while ((limit = ";
SynthCountByEnumWithState(buf);
buf += "));\n\t";
buf += elementName;
buf += " = ((";
buf += elementTypeAsString;
buf += ")0);\n\t";
buf += "__break_label_";
buf += utostr(ObjCBcLabelNo.back());
buf += ": ;\n\t";
buf += "}\n\t";
buf += "else\n\t\t";
buf += elementName;
buf += " = ((";
buf += elementTypeAsString;
buf += ")0);\n\t";
buf += "}\n";
// Insert all these *after* the statement body.
// FIXME: If this should support Obj-C++, support CXXTryStmt
if (isa<CompoundStmt>(S->getBody())) {
SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
InsertText(endBodyLoc, buf);
} else {
/* Need to treat single statements specially. For example:
*
* for (A *a in b) if (stuff()) break;
* for (A *a in b) xxxyy;
*
* The following code simply scans ahead to the semi to find the actual end.
*/
const char *stmtBuf = SM->getCharacterData(OrigEnd);
const char *semiBuf = strchr(stmtBuf, ';');
assert(semiBuf && "Can't find ';'");
SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
InsertText(endBodyLoc, buf);
}
Stmts.pop_back();
ObjCBcLabelNo.pop_back();
return nullptr;
}
static void Write_RethrowObject(std::string &buf) {
buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n";
buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n";
buf += "\tid rethrow;\n";
buf += "\t} _fin_force_rethow(_rethrow);";
}
/// RewriteObjCSynchronizedStmt -
/// This routine rewrites @synchronized(expr) stmt;
/// into:
/// objc_sync_enter(expr);
/// @try stmt @finally { objc_sync_exit(expr); }
///
Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synchronized location");
std::string buf;
SourceLocation SynchLoc = S->getAtSynchronizedLoc();
ConvertSourceLocationToLineDirective(SynchLoc, buf);
buf += "{ id _rethrow = 0; id _sync_obj = (id)";
const char *lparenBuf = startBuf;
while (*lparenBuf != '(') lparenBuf++;
ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
buf = "; objc_sync_enter(_sync_obj);\n";
buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}";
buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}";
buf += "\n\tid sync_exit;";
buf += "\n\t} _sync_exit(_sync_obj);\n";
// We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
// the sync expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart();
const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc);
while (*RParenExprLocBuf != ')') RParenExprLocBuf--;
RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf);
SourceLocation LBranceLoc = S->getSynchBody()->getLocStart();
const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc);
assert (*LBraceLocBuf == '{');
ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf);
SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd();
assert((*SM->getCharacterData(startRBraceLoc) == '}') &&
"bogus @synchronized block");
buf = "} catch (id e) {_rethrow = e;}\n";
Write_RethrowObject(buf);
buf += "}\n";
buf += "}\n";
ReplaceText(startRBraceLoc, 1, buf);
return nullptr;
}
void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
{
// Perform a bottom up traversal of all children.
for (Stmt *SubStmt : S->children())
if (SubStmt)
WarnAboutReturnGotoStmts(SubStmt);
if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
Diags.Report(Context->getFullLoc(S->getLocStart()),
TryFinallyContainsReturnDiag);
}
return;
}
Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
SourceLocation startLoc = S->getAtLoc();
ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */");
ReplaceText(S->getSubStmt()->getLocStart(), 1,
"{ __AtAutoreleasePool __autoreleasepool; ");
return nullptr;
}
Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
bool noCatch = S->getNumCatchStmts() == 0;
std::string buf;
SourceLocation TryLocation = S->getAtTryLoc();
ConvertSourceLocationToLineDirective(TryLocation, buf);
if (finalStmt) {
if (noCatch)
buf += "{ id volatile _rethrow = 0;\n";
else {
buf += "{ id volatile _rethrow = 0;\ntry {\n";
}
}
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @try location");
if (finalStmt)
ReplaceText(startLoc, 1, buf);
else
// @try -> try
ReplaceText(startLoc, 1, "");
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
VarDecl *catchDecl = Catch->getCatchParamDecl();
startLoc = Catch->getLocStart();
bool AtRemoved = false;
if (catchDecl) {
QualType t = catchDecl->getType();
if (const ObjCObjectPointerType *Ptr = t->getAs<ObjCObjectPointerType>()) {
// Should be a pointer to a class.
ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
if (IDecl) {
std::string Result;
ConvertSourceLocationToLineDirective(Catch->getLocStart(), Result);
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @catch location");
SourceLocation rParenLoc = Catch->getRParenLoc();
const char *rParenBuf = SM->getCharacterData(rParenLoc);
// _objc_exc_Foo *_e as argument to catch.
Result += "catch (_objc_exc_"; Result += IDecl->getNameAsString();
Result += " *_"; Result += catchDecl->getNameAsString();
Result += ")";
ReplaceText(startLoc, rParenBuf-startBuf+1, Result);
// Foo *e = (Foo *)_e;
Result.clear();
Result = "{ ";
Result += IDecl->getNameAsString();
Result += " *"; Result += catchDecl->getNameAsString();
Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)";
Result += "_"; Result += catchDecl->getNameAsString();
Result += "; ";
SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart();
ReplaceText(lBraceLoc, 1, Result);
AtRemoved = true;
}
}
}
if (!AtRemoved)
// @catch -> catch
ReplaceText(startLoc, 1, "");
}
if (finalStmt) {
buf.clear();
SourceLocation FinallyLoc = finalStmt->getLocStart();
if (noCatch) {
ConvertSourceLocationToLineDirective(FinallyLoc, buf);
buf += "catch (id e) {_rethrow = e;}\n";
}
else {
buf += "}\n";
ConvertSourceLocationToLineDirective(FinallyLoc, buf);
buf += "catch (id e) {_rethrow = e;}\n";
}
SourceLocation startFinalLoc = finalStmt->getLocStart();
ReplaceText(startFinalLoc, 8, buf);
Stmt *body = finalStmt->getFinallyBody();
SourceLocation startFinalBodyLoc = body->getLocStart();
buf.clear();
Write_RethrowObject(buf);
ReplaceText(startFinalBodyLoc, 1, buf);
SourceLocation endFinalBodyLoc = body->getLocEnd();
ReplaceText(endFinalBodyLoc, 1, "}\n}");
// Now check for any return/continue/go statements within the @try.
WarnAboutReturnGotoStmts(S->getTryBody());
}
return nullptr;
}
// This can't be done with ReplaceStmt(S, ThrowExpr), since
// the throw expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
// Get the start location and compute the semi location.
SourceLocation startLoc = S->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @throw location");
std::string buf;
/* void objc_exception_throw(id) __attribute__((noreturn)); */
if (S->getThrowExpr())
buf = "objc_exception_throw(";
else
buf = "throw";
// handle "@ throw" correctly.
const char *wBuf = strchr(startBuf, 'w');
assert((*wBuf == 'w') && "@throw: can't find 'w'");
ReplaceText(startLoc, wBuf-startBuf+1, buf);
SourceLocation endLoc = S->getLocEnd();
const char *endBuf = SM->getCharacterData(endLoc);
const char *semiBuf = strchr(endBuf, ';');
assert((*semiBuf == ';') && "@throw: can't find ';'");
SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
if (S->getThrowExpr())
ReplaceText(semiLoc, 1, ");");
return nullptr;
}
Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
// Create a new string expression.
std::string StrEncoding;
Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
Expr *Replacement = getStringLiteral(StrEncoding);
ReplaceStmt(Exp, Replacement);
// Replace this subexpr in the parent.
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return Replacement;
}
Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
// Create a call to sel_registerName("selName").
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size());
ReplaceStmt(Exp, SelExp);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return SelExp;
}
CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
SourceLocation EndLoc) {
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE =
new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
DRE, nullptr, VK_RValue);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs),
FT->getCallResultType(*Context),
VK_RValue, EndLoc);
return Exp;
}
static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
const char *&startRef, const char *&endRef) {
while (startBuf < endBuf) {
if (*startBuf == '<')
startRef = startBuf; // mark the start.
if (*startBuf == '>') {
if (startRef && *startRef == '<') {
endRef = startBuf; // mark the end.
return true;
}
return false;
}
startBuf++;
}
return false;
}
static void scanToNextArgument(const char *&argRef) {
int angle = 0;
while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
if (*argRef == '<')
angle++;
else if (*argRef == '>')
angle--;
argRef++;
}
assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
}
bool RewriteModernObjC::needToScanForQualifiers(QualType T) {
if (T->isObjCQualifiedIdType())
return true;
if (const PointerType *PT = T->getAs<PointerType>()) {
if (PT->getPointeeType()->isObjCQualifiedIdType())
return true;
}
if (T->isObjCObjectPointerType()) {
T = T->getPointeeType();
return T->isObjCQualifiedInterfaceType();
}
if (T->isArrayType()) {
QualType ElemTy = Context->getBaseElementType(T);
return needToScanForQualifiers(ElemTy);
}
return false;
}
void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
QualType Type = E->getType();
if (needToScanForQualifiers(Type)) {
SourceLocation Loc, EndLoc;
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
Loc = ECE->getLParenLoc();
EndLoc = ECE->getRParenLoc();
} else {
Loc = E->getLocStart();
EndLoc = E->getLocEnd();
}
// This will defend against trying to rewrite synthesized expressions.
if (Loc.isInvalid() || EndLoc.isInvalid())
return;
const char *startBuf = SM->getCharacterData(Loc);
const char *endBuf = SM->getCharacterData(EndLoc);
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
}
}
void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
SourceLocation Loc;
QualType Type;
const FunctionProtoType *proto = nullptr;
if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
Loc = VD->getLocation();
Type = VD->getType();
}
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
Loc = FD->getLocation();
// Check for ObjC 'id' and class types that have been adorned with protocol
// information (id<p>, C<p>*). The protocol references need to be rewritten!
const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
assert(funcType && "missing function type");
proto = dyn_cast<FunctionProtoType>(funcType);
if (!proto)
return;
Type = proto->getReturnType();
}
else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
Loc = FD->getLocation();
Type = FD->getType();
}
else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(Dcl)) {
Loc = TD->getLocation();
Type = TD->getUnderlyingType();
}
else
return;
if (needToScanForQualifiers(Type)) {
// Since types are unique, we need to scan the buffer.
const char *endBuf = SM->getCharacterData(Loc);
const char *startBuf = endBuf;
while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
startBuf--; // scan backward (from the decl location) for return type.
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
}
if (!proto)
return; // most likely, was a variable
// Now check arguments.
const char *startBuf = SM->getCharacterData(Loc);
const char *startFuncBuf = startBuf;
for (unsigned i = 0; i < proto->getNumParams(); i++) {
if (needToScanForQualifiers(proto->getParamType(i))) {
// Since types are unique, we need to scan the buffer.
const char *endBuf = startBuf;
// scan forward (from the decl location) for argument types.
scanToNextArgument(endBuf);
const char *startRef = nullptr, *endRef = nullptr;
if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
// Get the locations of the startRef, endRef.
SourceLocation LessLoc =
Loc.getLocWithOffset(startRef-startFuncBuf);
SourceLocation GreaterLoc =
Loc.getLocWithOffset(endRef-startFuncBuf+1);
// Comment out the protocol references.
InsertText(LessLoc, "/*");
InsertText(GreaterLoc, "*/");
}
startBuf = ++endBuf;
}
else {
// If the function name is derived from a macro expansion, then the
// argument buffer will not follow the name. Need to speak with Chris.
while (*startBuf && *startBuf != ')' && *startBuf != ',')
startBuf++; // scan forward (from the decl location) for argument types.
startBuf++;
}
}
}
void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) {
QualType QT = ND->getType();
const Type* TypePtr = QT->getAs<Type>();
if (!isa<TypeOfExprType>(TypePtr))
return;
while (isa<TypeOfExprType>(TypePtr)) {
const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
TypePtr = QT->getAs<Type>();
}
// FIXME. This will not work for multiple declarators; as in:
// __typeof__(a) b,c,d;
std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
const char *startBuf = SM->getCharacterData(DeclLoc);
if (ND->getInit()) {
std::string Name(ND->getNameAsString());
TypeAsString += " " + Name + " = ";
Expr *E = ND->getInit();
SourceLocation startLoc;
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
startLoc = E->getLocStart();
startLoc = SM->getExpansionLoc(startLoc);
const char *endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
else {
SourceLocation X = ND->getLocEnd();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
}
// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
void RewriteModernObjC::SynthSelGetUidFunctionDecl() {
IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getFuncType =
getSimpleFunctionType(Context->getObjCSelType(), ArgTys);
SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
SelGetUidIdent, getFuncType,
nullptr, SC_Extern);
}
void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) {
// declared in <objc/objc.h>
if (FD->getIdentifier() &&
FD->getName() == "sel_registerName") {
SelGetUidFunctionDecl = FD;
return;
}
RewriteObjCQualifiedInterfaceTypes(FD);
}
void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
const char *argPtr = TypeString.c_str();
if (!strchr(argPtr, '^')) {
Str += TypeString;
return;
}
while (*argPtr) {
Str += (*argPtr == '^' ? '*' : *argPtr);
argPtr++;
}
}
// FIXME. Consolidate this routine with RewriteBlockPointerType.
void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str,
ValueDecl *VD) {
QualType Type = VD->getType();
std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
const char *argPtr = TypeString.c_str();
int paren = 0;
while (*argPtr) {
switch (*argPtr) {
case '(':
Str += *argPtr;
paren++;
break;
case ')':
Str += *argPtr;
paren--;
break;
case '^':
Str += '*';
if (paren == 1)
Str += VD->getNameAsString();
break;
default:
Str += *argPtr;
break;
}
argPtr++;
}
}
void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
if (!proto)
return;
QualType Type = proto->getReturnType();
std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
FdStr += " ";
FdStr += FD->getName();
FdStr += "(";
unsigned numArgs = proto->getNumParams();
for (unsigned i = 0; i < numArgs; i++) {
QualType ArgType = proto->getParamType(i);
RewriteBlockPointerType(FdStr, ArgType);
if (i+1 < numArgs)
FdStr += ", ";
}
if (FD->isVariadic()) {
FdStr += (numArgs > 0) ? ", ...);\n" : "...);\n";
}
else
FdStr += ");\n";
InsertText(FunLocStart, FdStr);
}
// SynthSuperConstructorFunctionDecl - id __rw_objc_super(id obj, id super);
void RewriteModernObjC::SynthSuperConstructorFunctionDecl() {
if (SuperConstructorFunctionDecl)
return;
IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys);
SuperConstructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
void RewriteModernObjC::SynthMsgSendFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType, nullptr,
SC_Extern);
}
// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(void);
void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(Context->VoidTy);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
void RewriteModernObjC::SynthMsgSendStretFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthMsgSendSuperStretFunctionDecl -
// id objc_msgSendSuper_stret(void);
void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() {
IdentifierInfo *msgSendIdent =
&Context->Idents.get("objc_msgSendSuper_stret");
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(Context->VoidTy);
QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
ArgTys, /*isVariadic=*/true);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent,
msgSendType, nullptr,
SC_Extern);
}
// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
SmallVector<QualType, 16> ArgTys;
QualType argT = Context->getObjCIdType();
assert(!argT.isNull() && "Can't find 'id' type");
ArgTys.push_back(argT);
argT = Context->getObjCSelType();
assert(!argT.isNull() && "Can't find 'SEL' type");
ArgTys.push_back(argT);
QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
ArgTys, /*isVariadic=*/true);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
msgSendIdent, msgSendType,
nullptr, SC_Extern);
}
// SynthGetClassFunctionDecl - Class objc_getClass(const char *name);
void RewriteModernObjC::SynthGetClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
ArgTys);
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getClassIdent, getClassType,
nullptr, SC_Extern);
}
// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
void RewriteModernObjC::SynthGetSuperClassFunctionDecl() {
IdentifierInfo *getSuperClassIdent =
&Context->Idents.get("class_getSuperclass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getObjCClassType());
QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
ArgTys);
GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getSuperClassIdent,
getClassType, nullptr,
SC_Extern);
}
// SynthGetMetaClassFunctionDecl - Class objc_getMetaClass(const char *name);
void RewriteModernObjC::SynthGetMetaClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
ArgTys);
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SourceLocation(),
getClassIdent, getClassType,
nullptr, SC_Extern);
}
Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
assert (Exp != nullptr && "Expected non-null ObjCStringLiteral");
QualType strType = getConstantStringStructType();
std::string S = "__NSConstantStringImpl_";
std::string tmpName = InFileName;
unsigned i;
for (i=0; i < tmpName.length(); i++) {
char c = tmpName.at(i);
// replace any non-alphanumeric characters with '_'.
if (!isAlphanumeric(c))
tmpName[i] = '_';
}
S += tmpName;
S += "_";
S += utostr(NumObjCStringLiterals++);
Preamble += "static __NSConstantStringImpl " + S;
Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
Preamble += "0x000007c8,"; // utf8_str
// The pretty printer for StringLiteral handles escape characters properly.
std::string prettyBufS;
llvm::raw_string_ostream prettyBuf(prettyBufS);
Exp->getString()->printPretty(prettyBuf, nullptr, PrintingPolicy(LangOpts));
Preamble += prettyBuf.str();
Preamble += ",";
Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(S),
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
SourceLocation());
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
ReplaceStmt(Exp, cast);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return cast;
}
Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) {
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
Expr *FlagExp = IntegerLiteral::Create(*Context,
llvm::APInt(IntSize, Exp->getValue()),
Context->IntTy, Exp->getLocation());
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy,
CK_BitCast, FlagExp);
ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(),
cast);
ReplaceStmt(Exp, PE);
return PE;
}
Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
// synthesize declaration of helper functions needed in this routine.
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
// use objc_msgSend() for all.
if (!MsgSendFunctionDecl)
SynthMsgSendFunctionDecl();
if (!GetClassFunctionDecl)
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
SourceLocation StartLoc = Exp->getLocStart();
SourceLocation EndLoc = Exp->getLocEnd();
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 4> MsgExprs;
SmallVector<Expr*, 4> ClsExprs;
// Create a call to objc_getClass("<BoxingClass>"). It will be the 1st argument.
ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod();
ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface();
IdentifierInfo *clsName = BoxingClass->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
// Create a call to sel_registerName("<BoxingMethod>:"), etc.
// it will be the 2nd argument.
SmallVector<Expr*, 4> SelExprs;
SelExprs.push_back(
getStringLiteral(BoxingMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// User provided sub-expression is the 3rd, and last, argument.
Expr *subExpr = Exp->getSubExpr();
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(subExpr)) {
QualType type = ICE->getType();
const Expr *SubExpr = ICE->IgnoreParenImpCasts();
CastKind CK = CK_BitCast;
if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType())
CK = CK_IntegralToBoolean;
subExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, subExpr);
}
MsgExprs.push_back(subExpr);
SmallVector<QualType, 4> ArgTypes;
ArgTypes.push_back(Context->getObjCClassType());
ArgTypes.push_back(Context->getObjCSelType());
for (const auto PI : BoxingMethod->parameters())
ArgTypes.push_back(PI->getType());
QualType returnType = Exp->getType();
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
getSimpleFunctionType(returnType, ArgTypes, BoxingMethod->isVariadic());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context)
CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
// synthesize declaration of helper functions needed in this routine.
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
// use objc_msgSend() for all.
if (!MsgSendFunctionDecl)
SynthMsgSendFunctionDecl();
if (!GetClassFunctionDecl)
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
SourceLocation StartLoc = Exp->getLocStart();
SourceLocation EndLoc = Exp->getLocEnd();
// Build the expression: __NSContainer_literal(int, ...).arr
QualType IntQT = Context->IntTy;
QualType NSArrayFType =
getSimpleFunctionType(Context->VoidTy, IntQT, true);
std::string NSArrayFName("__NSContainer_literal");
FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
DeclRefExpr *NSArrayDRE =
new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue,
SourceLocation());
SmallVector<Expr*, 16> InitExprs;
unsigned NumElements = Exp->getNumElements();
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *count = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, NumElements),
Context->UnsignedIntTy, SourceLocation());
InitExprs.push_back(count);
for (unsigned i = 0; i < NumElements; i++)
InitExprs.push_back(Exp->getElement(i));
Expr *NSArrayCallExpr =
new (Context) CallExpr(*Context, NSArrayDRE, InitExprs,
NSArrayFType, VK_LValue, SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("arr"),
Context->getPointerType(Context->VoidPtrTy),
nullptr, /*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
MemberExpr *ArrayLiteralME = new (Context)
MemberExpr(NSArrayCallExpr, false, SourceLocation(), ARRFD,
SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
QualType ConstIdT = Context->getObjCIdType().withConst();
CStyleCastExpr * ArrayLiteralObjects =
NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(ConstIdT),
CK_BitCast,
ArrayLiteralME);
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 32> MsgExprs;
SmallVector<Expr*, 4> ClsExprs;
QualType expType = Exp->getType();
// Create a call to objc_getClass("NSArray"). It will be th 1st argument.
ObjCInterfaceDecl *Class =
expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
// Create a call to sel_registerName("arrayWithObjects:count:").
// it will be the 2nd argument.
SmallVector<Expr*, 4> SelExprs;
ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod();
SelExprs.push_back(
getStringLiteral(ArrayMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// (const id [])objects
MsgExprs.push_back(ArrayLiteralObjects);
// (NSUInteger)cnt
Expr *cnt = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, NumElements),
Context->UnsignedIntTy, SourceLocation());
MsgExprs.push_back(cnt);
SmallVector<QualType, 4> ArgTypes;
ArgTypes.push_back(Context->getObjCClassType());
ArgTypes.push_back(Context->getObjCSelType());
for (const auto *PI : ArrayMethod->params())
ArgTypes.push_back(PI->getType());
QualType returnType = Exp->getType();
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
getSimpleFunctionType(returnType, ArgTypes, ArrayMethod->isVariadic());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context)
CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) {
// synthesize declaration of helper functions needed in this routine.
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
// use objc_msgSend() for all.
if (!MsgSendFunctionDecl)
SynthMsgSendFunctionDecl();
if (!GetClassFunctionDecl)
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
SourceLocation StartLoc = Exp->getLocStart();
SourceLocation EndLoc = Exp->getLocEnd();
// Build the expression: __NSContainer_literal(int, ...).arr
QualType IntQT = Context->IntTy;
QualType NSDictFType =
getSimpleFunctionType(Context->VoidTy, IntQT, true);
std::string NSDictFName("__NSContainer_literal");
FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
DeclRefExpr *NSDictDRE =
new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue,
SourceLocation());
SmallVector<Expr*, 16> KeyExprs;
SmallVector<Expr*, 16> ValueExprs;
unsigned NumElements = Exp->getNumElements();
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *count = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, NumElements),
Context->UnsignedIntTy, SourceLocation());
KeyExprs.push_back(count);
ValueExprs.push_back(count);
for (unsigned i = 0; i < NumElements; i++) {
ObjCDictionaryElement Element = Exp->getKeyValueElement(i);
KeyExprs.push_back(Element.Key);
ValueExprs.push_back(Element.Value);
}
// (const id [])objects
Expr *NSValueCallExpr =
new (Context) CallExpr(*Context, NSDictDRE, ValueExprs,
NSDictFType, VK_LValue, SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("arr"),
Context->getPointerType(Context->VoidPtrTy),
nullptr, /*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
MemberExpr *DictLiteralValueME = new (Context)
MemberExpr(NSValueCallExpr, false, SourceLocation(), ARRFD,
SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
QualType ConstIdT = Context->getObjCIdType().withConst();
CStyleCastExpr * DictValueObjects =
NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(ConstIdT),
CK_BitCast,
DictLiteralValueME);
// (const id <NSCopying> [])keys
Expr *NSKeyCallExpr =
new (Context) CallExpr(*Context, NSDictDRE, KeyExprs,
NSDictFType, VK_LValue, SourceLocation());
MemberExpr *DictLiteralKeyME = new (Context)
MemberExpr(NSKeyCallExpr, false, SourceLocation(), ARRFD,
SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary);
CStyleCastExpr * DictKeyObjects =
NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(ConstIdT),
CK_BitCast,
DictLiteralKeyME);
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 32> MsgExprs;
SmallVector<Expr*, 4> ClsExprs;
QualType expType = Exp->getType();
// Create a call to objc_getClass("NSArray"). It will be th 1st argument.
ObjCInterfaceDecl *Class =
expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(Cls);
// Create a call to sel_registerName("arrayWithObjects:count:").
// it will be the 2nd argument.
SmallVector<Expr*, 4> SelExprs;
ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
SelExprs.push_back(getStringLiteral(DictMethod->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
StartLoc, EndLoc);
MsgExprs.push_back(SelExp);
// (const id [])objects
MsgExprs.push_back(DictValueObjects);
// (const id <NSCopying> [])keys
MsgExprs.push_back(DictKeyObjects);
// (NSUInteger)cnt
Expr *cnt = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, NumElements),
Context->UnsignedIntTy, SourceLocation());
MsgExprs.push_back(cnt);
SmallVector<QualType, 8> ArgTypes;
ArgTypes.push_back(Context->getObjCClassType());
ArgTypes.push_back(Context->getObjCSelType());
for (const auto *PI : DictMethod->params()) {
QualType T = PI->getType();
if (const PointerType* PT = T->getAs<PointerType>()) {
QualType PointeeTy = PT->getPointeeType();
convertToUnqualifiedObjCType(PointeeTy);
T = Context->getPointerType(PointeeTy);
}
ArgTypes.push_back(T);
}
QualType returnType = Exp->getType();
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
getSimpleFunctionType(returnType, ArgTypes, DictMethod->isVariadic());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context)
CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
// struct __rw_objc_super {
// struct objc_object *object; struct objc_object *superClass;
// };
QualType RewriteModernObjC::getSuperStructType() {
if (!SuperStructDecl) {
SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__rw_objc_super"));
QualType FieldTypes[2];
// struct objc_object *object;
FieldTypes[0] = Context->getObjCIdType();
// struct objc_object *superClass;
FieldTypes[1] = Context->getObjCIdType();
// Create fields
for (unsigned i = 0; i < 2; ++i) {
SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit));
}
SuperStructDecl->completeDefinition();
}
return Context->getTagDeclType(SuperStructDecl);
}
QualType RewriteModernObjC::getConstantStringStructType() {
if (!ConstantStringDecl) {
ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__NSConstantStringImpl"));
QualType FieldTypes[4];
// struct objc_object *receiver;
FieldTypes[0] = Context->getObjCIdType();
// int flags;
FieldTypes[1] = Context->IntTy;
// char *str;
FieldTypes[2] = Context->getPointerType(Context->CharTy);
// long length;
FieldTypes[3] = Context->LongTy;
// Create fields
for (unsigned i = 0; i < 4; ++i) {
ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
ConstantStringDecl,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true,
ICIS_NoInit));
}
ConstantStringDecl->completeDefinition();
}
return Context->getTagDeclType(ConstantStringDecl);
}
/// getFunctionSourceLocation - returns start location of a function
/// definition. Complication arises when function has declared as
/// extern "C" or extern "C" {...}
static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R,
FunctionDecl *FD) {
if (FD->isExternC() && !FD->isMain()) {
const DeclContext *DC = FD->getDeclContext();
if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
// if it is extern "C" {...}, return function decl's own location.
if (!LSD->getRBraceLoc().isValid())
return LSD->getExternLoc();
}
if (FD->getStorageClass() != SC_None)
R.RewriteBlockLiteralFunctionDecl(FD);
return FD->getTypeSpecStartLoc();
}
void RewriteModernObjC::RewriteLineDirective(const Decl *D) {
SourceLocation Location = D->getLocation();
if (Location.isFileID() && GenerateLineInfo) {
std::string LineString("\n#line ");
PresumedLoc PLoc = SM->getPresumedLoc(Location);
LineString += utostr(PLoc.getLine());
LineString += " \"";
LineString += Lexer::Stringify(PLoc.getFilename());
if (isa<ObjCMethodDecl>(D))
LineString += "\"";
else LineString += "\"\n";
Location = D->getLocStart();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isExternC() && !FD->isMain()) {
const DeclContext *DC = FD->getDeclContext();
if (const LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(DC))
// if it is extern "C" {...}, return function decl's own location.
if (!LSD->getRBraceLoc().isValid())
Location = LSD->getExternLoc();
}
}
InsertText(Location, LineString);
}
}
/// SynthMsgSendStretCallExpr - This routine translates message expression
/// into a call to objc_msgSend_stret() entry point. Tricky part is that
/// nil check on receiver must be performed before calling objc_msgSend_stret.
/// MsgSendStretFlavor - function declaration objc_msgSend_stret(...)
/// msgSendType - function type of objc_msgSend_stret(...)
/// returnType - Result type of the method being synthesized.
/// ArgTypes - type of the arguments passed to objc_msgSend_stret, starting with receiver type.
/// MsgExprs - list of argument expressions being passed to objc_msgSend_stret,
/// starting with receiver.
/// Method - Method being rewritten.
Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor,
QualType returnType,
SmallVectorImpl<QualType> &ArgTypes,
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method) {
// Now do the "normal" pointer to function cast.
QualType castType = getSimpleFunctionType(returnType, ArgTypes,
Method ? Method->isVariadic()
: false);
castType = Context->getPointerType(castType);
// build type for containing the objc_msgSend_stret object.
static unsigned stretCount=0;
std::string name = "__Stret"; name += utostr(stretCount);
std::string str =
"extern \"C\" void * __cdecl memset(void *_Dst, int _Val, size_t _Size);\n";
str += "namespace {\n";
str += "struct "; str += name;
str += " {\n\t";
str += name;
str += "(id receiver, SEL sel";
for (unsigned i = 2; i < ArgTypes.size(); i++) {
std::string ArgName = "arg"; ArgName += utostr(i);
ArgTypes[i].getAsStringInternal(ArgName, Context->getPrintingPolicy());
str += ", "; str += ArgName;
}
// could be vararg.
for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
std::string ArgName = "arg"; ArgName += utostr(i);
MsgExprs[i]->getType().getAsStringInternal(ArgName,
Context->getPrintingPolicy());
str += ", "; str += ArgName;
}
str += ") {\n";
str += "\t unsigned size = sizeof(";
str += returnType.getAsString(Context->getPrintingPolicy()); str += ");\n";
str += "\t if (size == 1 || size == 2 || size == 4 || size == 8)\n";
str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy());
str += ")(void *)objc_msgSend)(receiver, sel";
for (unsigned i = 2; i < ArgTypes.size(); i++) {
str += ", arg"; str += utostr(i);
}
// could be vararg.
for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
str += ", arg"; str += utostr(i);
}
str+= ");\n";
str += "\t else if (receiver == 0)\n";
str += "\t memset((void*)&s, 0, sizeof(s));\n";
str += "\t else\n";
str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy());
str += ")(void *)objc_msgSend_stret)(receiver, sel";
for (unsigned i = 2; i < ArgTypes.size(); i++) {
str += ", arg"; str += utostr(i);
}
// could be vararg.
for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) {
str += ", arg"; str += utostr(i);
}
str += ");\n";
str += "\t}\n";
str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy());
str += " s;\n";
str += "};\n};\n\n";
SourceLocation FunLocStart;
if (CurFunctionDef)
FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
else {
assert(CurMethodDef && "SynthMsgSendStretCallExpr - CurMethodDef is null");
FunLocStart = CurMethodDef->getLocStart();
}
InsertText(FunLocStart, str);
++stretCount;
// AST for __Stretn(receiver, args).s;
IdentifierInfo *ID = &Context->Idents.get(name);
FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, castType,
nullptr, SC_Extern, false, false);
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
SourceLocation());
CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs,
castType, VK_LValue, SourceLocation());
FieldDecl *FieldD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("s"),
returnType, nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(STCE, false, SourceLocation(), FieldD, SourceLocation(),
FieldD->getType(), VK_LValue, OK_Ordinary);
return ME;
}
Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc,
SourceLocation EndLoc) {
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
if (!MsgSendFunctionDecl)
SynthMsgSendFunctionDecl();
if (!MsgSendSuperFunctionDecl)
SynthMsgSendSuperFunctionDecl();
if (!MsgSendStretFunctionDecl)
SynthMsgSendStretFunctionDecl();
if (!MsgSendSuperStretFunctionDecl)
SynthMsgSendSuperStretFunctionDecl();
if (!MsgSendFpretFunctionDecl)
SynthMsgSendFpretFunctionDecl();
if (!GetClassFunctionDecl)
SynthGetClassFunctionDecl();
if (!GetSuperClassFunctionDecl)
SynthGetSuperClassFunctionDecl();
if (!GetMetaClassFunctionDecl)
SynthGetMetaClassFunctionDecl();
// default to objc_msgSend().
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
// May need to use objc_msgSend_stret() as well.
FunctionDecl *MsgSendStretFlavor = nullptr;
if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
QualType resultType = mDecl->getReturnType();
if (resultType->isRecordType())
MsgSendStretFlavor = MsgSendStretFunctionDecl;
else if (resultType->isRealFloatingType())
MsgSendFlavor = MsgSendFpretFunctionDecl;
}
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 8> MsgExprs;
switch (Exp->getReceiverKind()) {
case ObjCMessageExpr::SuperClass: {
MsgSendFlavor = MsgSendSuperFunctionDecl;
if (MsgSendStretFlavor)
MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
// set the receiver to self, the first argument to all methods.
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue,
SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
// (Class)objc_getClass("CurrentClass")
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc,
EndLoc);
ClsExprs.clear();
ClsExprs.push_back(Cls);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
// To turn off a warning, type-cast to 'id'
InitExprs.push_back( // set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context,
Context->getObjCIdType(),
CK_BitCast, Cls));
// struct __rw_objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue,
SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
} else {
// (struct __rw_objc_super) { <exprs from above> }
InitListExpr *ILE =
new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
superType, VK_LValue,
ILE, false);
// struct __rw_objc_super *
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
}
MsgExprs.push_back(SuperRep);
break;
}
case ObjCMessageExpr::Class: {
SmallVector<Expr*, 8> ClsExprs;
ObjCInterfaceDecl *Class
= Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(getStringLiteral(clsName->getName()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCIdType(),
CK_BitCast, Cls);
MsgExprs.push_back(ArgExpr);
break;
}
case ObjCMessageExpr::SuperInstance:{
MsgSendFlavor = MsgSendSuperFunctionDecl;
if (MsgSendStretFlavor)
MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue, SourceLocation()))
); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
ClsExprs.push_back(getStringLiteral(ClassDecl->getIdentifier()->getName()));
// (Class)objc_getClass("CurrentClass")
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
ClsExprs.size(),
StartLoc, EndLoc);
ClsExprs.clear();
ClsExprs.push_back(Cls);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
&ClsExprs[0], ClsExprs.size(),
StartLoc, EndLoc);
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
// To turn off a warning, type-cast to 'id'
InitExprs.push_back(
// set 'super class', using class_getSuperclass().
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast, Cls));
// struct __rw_objc_super
QualType superType = getSuperStructType();
Expr *SuperRep;
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
superType, VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
SourceLocation());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
} else {
// (struct __rw_objc_super) { <exprs from above> }
InitListExpr *ILE =
new (Context) InitListExpr(*Context, SourceLocation(), InitExprs,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
superType, VK_RValue, ILE,
false);
}
MsgExprs.push_back(SuperRep);
break;
}
case ObjCMessageExpr::Instance: {
// Remove all type-casts because it may contain objc-style types; e.g.
// Foo<Proto> *.
Expr *recExpr = Exp->getInstanceReceiver();
while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
recExpr = CE->getSubExpr();
CastKind CK = recExpr->getType()->isObjCObjectPointerType()
? CK_BitCast : recExpr->getType()->isBlockPointerType()
? CK_BlockPointerToObjCPointerCast
: CK_CPointerToObjCPointerCast;
recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK, recExpr);
MsgExprs.push_back(recExpr);
break;
}
}
// Create a call to sel_registerName("selName"), it will be the 2nd argument.
SmallVector<Expr*, 8> SelExprs;
SelExprs.push_back(getStringLiteral(Exp->getSelector().getAsString()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
StartLoc,
EndLoc);
MsgExprs.push_back(SelExp);
// Now push any user supplied arguments.
for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
Expr *userExpr = Exp->getArg(i);
// Make all implicit casts explicit...ICE comes in handy:-)
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
// Reuse the ICE type, it is exactly what the doctor ordered.
QualType type = ICE->getType();
if (needToScanForQualifiers(type))
type = Context->getObjCIdType();
// Make sure we convert "type (^)(...)" to "type (*)(...)".
(void)convertBlockPointerToFunctionPointer(type);
const Expr *SubExpr = ICE->IgnoreParenImpCasts();
CastKind CK;
if (SubExpr->getType()->isIntegralType(*Context) &&
type->isBooleanType()) {
CK = CK_IntegralToBoolean;
} else if (type->isObjCObjectPointerType()) {
if (SubExpr->getType()->isBlockPointerType()) {
CK = CK_BlockPointerToObjCPointerCast;
} else if (SubExpr->getType()->isPointerType()) {
CK = CK_CPointerToObjCPointerCast;
} else {
CK = CK_BitCast;
}
} else {
CK = CK_BitCast;
}
userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
}
// Make id<P...> cast into an 'id' cast.
else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
if (CE->getType()->isObjCQualifiedIdType()) {
while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
userExpr = CE->getSubExpr();
CastKind CK;
if (userExpr->getType()->isIntegralType(*Context)) {
CK = CK_IntegralToPointer;
} else if (userExpr->getType()->isBlockPointerType()) {
CK = CK_BlockPointerToObjCPointerCast;
} else if (userExpr->getType()->isPointerType()) {
CK = CK_CPointerToObjCPointerCast;
} else {
CK = CK_BitCast;
}
userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK, userExpr);
}
}
MsgExprs.push_back(userExpr);
// We've transferred the ownership to MsgExprs. For now, we *don't* null
// out the argument in the original expression (since we aren't deleting
// the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
//Exp->setArg(i, 0);
}
// Generate the funky cast.
CastExpr *cast;
SmallVector<QualType, 8> ArgTypes;
QualType returnType;
// Push 'id' and 'SEL', the 2 implicit arguments.
if (MsgSendFlavor == MsgSendSuperFunctionDecl)
ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
else
ArgTypes.push_back(Context->getObjCIdType());
ArgTypes.push_back(Context->getObjCSelType());
if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
// Push any user argument types.
for (const auto *PI : OMD->params()) {
QualType t = PI->getType()->isObjCQualifiedIdType()
? Context->getObjCIdType()
: PI->getType();
// Make sure we convert "t (^)(...)" to "t (*)(...)".
(void)convertBlockPointerToFunctionPointer(t);
ArgTypes.push_back(t);
}
returnType = Exp->getType();
convertToUnqualifiedObjCType(returnType);
(void)convertBlockPointerToFunctionPointer(returnType);
} else {
returnType = Context->getObjCIdType();
}
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
// If we don't do this cast, we get the following bizarre warning/note:
// xx.m:13: warning: function called through a non-compatible type
// xx.m:13: note: if this code is reached, the program will abort
cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
// If we don't have a method decl, force a variadic cast.
const ObjCMethodDecl *MD = Exp->getMethodDecl();
QualType castType =
getSimpleFunctionType(returnType, ArgTypes, MD ? MD->isVariadic() : true);
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
cast);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context)
CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
// call to objc_msgSend_stret and hang both varieties on a conditional
// expression which dictate which one to envoke depending on size of
// method's return type.
Expr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor,
returnType,
ArgTypes, MsgExprs,
Exp->getMethodDecl());
ReplacingStmt = STCE;
}
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
Exp->getLocEnd());
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return ReplacingStmt;
}
// typedef struct objc_object Protocol;
QualType RewriteModernObjC::getProtocolType() {
if (!ProtocolTypeDecl) {
TypeSourceInfo *TInfo
= Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("Protocol"),
TInfo);
}
return Context->getTypeDeclType(ProtocolTypeDecl);
}
/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
/// a synthesized/forward data reference (to the protocol's metadata).
/// The forward references (and metadata) are generated in
/// RewriteModernObjC::HandleTranslationUnit().
Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" +
Exp->getProtocol()->getNameAsString();
IdentifierInfo *ID = &Context->Idents.get(Name);
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, getProtocolType(),
nullptr, SC_Extern);
DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
VK_LValue, SourceLocation());
CastExpr *castExpr =
NoTypeInfoCStyleCastExpr(
Context, Context->getPointerType(DRE->getType()), CK_BitCast, DRE);
ReplaceStmt(Exp, castExpr);
ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return castExpr;
}
bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf,
const char *endBuf) {
while (startBuf < endBuf) {
if (*startBuf == '#') {
// Skip whitespace.
for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
;
if (!strncmp(startBuf, "if", strlen("if")) ||
!strncmp(startBuf, "ifdef", strlen("ifdef")) ||
!strncmp(startBuf, "ifndef", strlen("ifndef")) ||
!strncmp(startBuf, "define", strlen("define")) ||
!strncmp(startBuf, "undef", strlen("undef")) ||
!strncmp(startBuf, "else", strlen("else")) ||
!strncmp(startBuf, "elif", strlen("elif")) ||
!strncmp(startBuf, "endif", strlen("endif")) ||
!strncmp(startBuf, "pragma", strlen("pragma")) ||
!strncmp(startBuf, "include", strlen("include")) ||
!strncmp(startBuf, "import", strlen("import")) ||
!strncmp(startBuf, "include_next", strlen("include_next")))
return true;
}
startBuf++;
}
return false;
}
/// IsTagDefinedInsideClass - This routine checks that a named tagged type
/// is defined inside an objective-c class. If so, it returns true.
bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl,
TagDecl *Tag,
bool &IsNamedDefinition) {
if (!IDecl)
return false;
SourceLocation TagLocation;
if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag)) {
RD = RD->getDefinition();
if (!RD || !RD->getDeclName().getAsIdentifierInfo())
return false;
IsNamedDefinition = true;
TagLocation = RD->getLocation();
return Context->getSourceManager().isBeforeInTranslationUnit(
IDecl->getLocation(), TagLocation);
}
if (EnumDecl *ED = dyn_cast<EnumDecl>(Tag)) {
if (!ED || !ED->getDeclName().getAsIdentifierInfo())
return false;
IsNamedDefinition = true;
TagLocation = ED->getLocation();
return Context->getSourceManager().isBeforeInTranslationUnit(
IDecl->getLocation(), TagLocation);
}
return false;
}
/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer.
/// It handles elaborated types, as well as enum types in the process.
bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
std::string &Result) {
if (isa<TypedefType>(Type)) {
Result += "\t";
return false;
}
if (Type->isArrayType()) {
QualType ElemTy = Context->getBaseElementType(Type);
return RewriteObjCFieldDeclType(ElemTy, Result);
}
else if (Type->isRecordType()) {
RecordDecl *RD = Type->getAs<RecordType>()->getDecl();
if (RD->isCompleteDefinition()) {
if (RD->isStruct())
Result += "\n\tstruct ";
else if (RD->isUnion())
Result += "\n\tunion ";
else
assert(false && "class not allowed as an ivar type");
Result += RD->getName();
if (GlobalDefinedTags.count(RD)) {
// struct/union is defined globally, use it.
Result += " ";
return true;
}
Result += " {\n";
for (auto *FD : RD->fields())
RewriteObjCFieldDecl(FD, Result);
Result += "\t} ";
return true;
}
}
else if (Type->isEnumeralType()) {
EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
if (ED->isCompleteDefinition()) {
Result += "\n\tenum ";
Result += ED->getName();
if (GlobalDefinedTags.count(ED)) {
// Enum is globall defined, use it.
Result += " ";
return true;
}
Result += " {\n";
for (const auto *EC : ED->enumerators()) {
Result += "\t"; Result += EC->getName(); Result += " = ";
llvm::APSInt Val = EC->getInitVal();
Result += Val.toString(10);
Result += ",\n";
}
Result += "\t} ";
return true;
}
}
Result += "\t";
convertObjCTypeToCStyleType(Type);
return false;
}
/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer.
/// It handles elaborated types, as well as enum types in the process.
void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
std::string &Result) {
QualType Type = fieldDecl->getType();
std::string Name = fieldDecl->getNameAsString();
bool EleboratedType = RewriteObjCFieldDeclType(Type, Result);
if (!EleboratedType)
Type.getAsStringInternal(Name, Context->getPrintingPolicy());
Result += Name;
if (fieldDecl->isBitField()) {
Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context));
}
else if (EleboratedType && Type->isArrayType()) {
const ArrayType *AT = Context->getAsArrayType(Type);
do {
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
Result += "[";
llvm::APInt Dim = CAT->getSize();
Result += utostr(Dim.getZExtValue());
Result += "]";
}
AT = Context->getAsArrayType(AT->getElementType());
} while (AT);
}
Result += ";\n";
}
/// RewriteLocallyDefinedNamedAggregates - This routine rewrites locally defined
/// named aggregate types into the input buffer.
void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
std::string &Result) {
QualType Type = fieldDecl->getType();
if (isa<TypedefType>(Type))
return;
if (Type->isArrayType())
Type = Context->getBaseElementType(Type);
ObjCContainerDecl *IDecl =
dyn_cast<ObjCContainerDecl>(fieldDecl->getDeclContext());
TagDecl *TD = nullptr;
if (Type->isRecordType()) {
TD = Type->getAs<RecordType>()->getDecl();
}
else if (Type->isEnumeralType()) {
TD = Type->getAs<EnumType>()->getDecl();
}
if (TD) {
if (GlobalDefinedTags.count(TD))
return;
bool IsNamedDefinition = false;
if (IsTagDefinedInsideClass(IDecl, TD, IsNamedDefinition)) {
RewriteObjCFieldDeclType(Type, Result);
Result += ";";
}
if (IsNamedDefinition)
GlobalDefinedTags.insert(TD);
}
}
unsigned RewriteModernObjC::ObjCIvarBitfieldGroupNo(ObjCIvarDecl *IV) {
const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
if (ObjCInterefaceHasBitfieldGroups.count(CDecl)) {
return IvarGroupNumber[IV];
}
unsigned GroupNo = 0;
SmallVector<const ObjCIvarDecl *, 8> IVars;
for (const ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
IVars.push_back(IVD);
for (unsigned i = 0, e = IVars.size(); i < e; i++)
if (IVars[i]->isBitField()) {
IvarGroupNumber[IVars[i++]] = ++GroupNo;
while (i < e && IVars[i]->isBitField())
IvarGroupNumber[IVars[i++]] = GroupNo;
if (i < e)
--i;
}
ObjCInterefaceHasBitfieldGroups.insert(CDecl);
return IvarGroupNumber[IV];
}
QualType RewriteModernObjC::SynthesizeBitfieldGroupStructType(
ObjCIvarDecl *IV,
SmallVectorImpl<ObjCIvarDecl *> &IVars) {
std::string StructTagName;
ObjCIvarBitfieldGroupType(IV, StructTagName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct,
Context->getTranslationUnitDecl(),
SourceLocation(), SourceLocation(),
&Context->Idents.get(StructTagName));
for (unsigned i=0, e = IVars.size(); i < e; i++) {
ObjCIvarDecl *Ivar = IVars[i];
RD->addDecl(FieldDecl::Create(*Context, RD, SourceLocation(), SourceLocation(),
&Context->Idents.get(Ivar->getName()),
Ivar->getType(),
nullptr, /*Expr *BW */Ivar->getBitWidth(),
false, ICIS_NoInit));
}
RD->completeDefinition();
return Context->getTagDeclType(RD);
}
QualType RewriteModernObjC::GetGroupRecordTypeForObjCIvarBitfield(ObjCIvarDecl *IV) {
const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
std::pair<const ObjCInterfaceDecl*, unsigned> tuple = std::make_pair(CDecl, GroupNo);
if (GroupRecordType.count(tuple))
return GroupRecordType[tuple];
SmallVector<ObjCIvarDecl *, 8> IVars;
for (const ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
if (IVD->isBitField())
IVars.push_back(const_cast<ObjCIvarDecl *>(IVD));
else {
if (!IVars.empty()) {
unsigned GroupNo = ObjCIvarBitfieldGroupNo(IVars[0]);
// Generate the struct type for this group of bitfield ivars.
GroupRecordType[std::make_pair(CDecl, GroupNo)] =
SynthesizeBitfieldGroupStructType(IVars[0], IVars);
IVars.clear();
}
}
}
if (!IVars.empty()) {
// Do the last one.
unsigned GroupNo = ObjCIvarBitfieldGroupNo(IVars[0]);
GroupRecordType[std::make_pair(CDecl, GroupNo)] =
SynthesizeBitfieldGroupStructType(IVars[0], IVars);
}
QualType RetQT = GroupRecordType[tuple];
assert(!RetQT.isNull() && "GetGroupRecordTypeForObjCIvarBitfield struct type is NULL");
return RetQT;
}
/// ObjCIvarBitfieldGroupDecl - Names field decl. for ivar bitfield group.
/// Name would be: classname__GRBF_n where n is the group number for this ivar.
void RewriteModernObjC::ObjCIvarBitfieldGroupDecl(ObjCIvarDecl *IV,
std::string &Result) {
const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
Result += CDecl->getName();
Result += "__GRBF_";
unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
Result += utostr(GroupNo);
return;
}
/// ObjCIvarBitfieldGroupType - Names struct type for ivar bitfield group.
/// Name of the struct would be: classname__T_n where n is the group number for
/// this ivar.
void RewriteModernObjC::ObjCIvarBitfieldGroupType(ObjCIvarDecl *IV,
std::string &Result) {
const ObjCInterfaceDecl *CDecl = IV->getContainingInterface();
Result += CDecl->getName();
Result += "__T_";
unsigned GroupNo = ObjCIvarBitfieldGroupNo(IV);
Result += utostr(GroupNo);
return;
}
/// ObjCIvarBitfieldGroupOffset - Names symbol for ivar bitfield group field offset.
/// Name would be: OBJC_IVAR_$_classname__GRBF_n where n is the group number for
/// this ivar.
void RewriteModernObjC::ObjCIvarBitfieldGroupOffset(ObjCIvarDecl *IV,
std::string &Result) {
Result += "OBJC_IVAR_$_";
ObjCIvarBitfieldGroupDecl(IV, Result);
}
#define SKIP_BITFIELDS(IX, ENDIX, VEC) { \
while ((IX < ENDIX) && VEC[IX]->isBitField()) \
++IX; \
if (IX < ENDIX) \
--IX; \
}
/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
/// an objective-c class with ivars.
void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result) {
assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
assert(CDecl->getName() != "" &&
"Name missing in SynthesizeObjCInternalStruct");
ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
SmallVector<ObjCIvarDecl *, 8> IVars;
for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
IVars.push_back(IVD);
SourceLocation LocStart = CDecl->getLocStart();
SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
// If no ivars and no root or if its root, directly or indirectly,
// have no ivars (thus not synthesized) then no need to synthesize this class.
if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) &&
(!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
ReplaceText(LocStart, endBuf-startBuf, Result);
return;
}
// Insert named struct/union definitions inside class to
// outer scope. This follows semantics of locally defined
// struct/unions in objective-c classes.
for (unsigned i = 0, e = IVars.size(); i < e; i++)
RewriteLocallyDefinedNamedAggregates(IVars[i], Result);
// Insert named structs which are syntheized to group ivar bitfields
// to outer scope as well.
for (unsigned i = 0, e = IVars.size(); i < e; i++)
if (IVars[i]->isBitField()) {
ObjCIvarDecl *IV = IVars[i];
QualType QT = GetGroupRecordTypeForObjCIvarBitfield(IV);
RewriteObjCFieldDeclType(QT, Result);
Result += ";";
// skip over ivar bitfields in this group.
SKIP_BITFIELDS(i , e, IVars);
}
Result += "\nstruct ";
Result += CDecl->getNameAsString();
Result += "_IMPL {\n";
if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
Result += "\tstruct "; Result += RCDecl->getNameAsString();
Result += "_IMPL "; Result += RCDecl->getNameAsString();
Result += "_IVARS;\n";
}
for (unsigned i = 0, e = IVars.size(); i < e; i++) {
if (IVars[i]->isBitField()) {
ObjCIvarDecl *IV = IVars[i];
Result += "\tstruct ";
ObjCIvarBitfieldGroupType(IV, Result); Result += " ";
ObjCIvarBitfieldGroupDecl(IV, Result); Result += ";\n";
// skip over ivar bitfields in this group.
SKIP_BITFIELDS(i , e, IVars);
}
else
RewriteObjCFieldDecl(IVars[i], Result);
}
Result += "};\n";
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
ReplaceText(LocStart, endBuf-startBuf, Result);
// Mark this struct as having been generated.
if (!ObjCSynthesizedStructs.insert(CDecl).second)
llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
}
/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which
/// have been referenced in an ivar access expression.
void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
std::string &Result) {
// write out ivar offset symbols which have been referenced in an ivar
// access expression.
llvm::SmallPtrSet<ObjCIvarDecl *, 8> Ivars = ReferencedIvars[CDecl];
if (Ivars.empty())
return;
llvm::DenseSet<std::pair<const ObjCInterfaceDecl*, unsigned> > GroupSymbolOutput;
for (ObjCIvarDecl *IvarDecl : Ivars) {
const ObjCInterfaceDecl *IDecl = IvarDecl->getContainingInterface();
unsigned GroupNo = 0;
if (IvarDecl->isBitField()) {
GroupNo = ObjCIvarBitfieldGroupNo(IvarDecl);
if (GroupSymbolOutput.count(std::make_pair(IDecl, GroupNo)))
continue;
}
Result += "\n";
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_ivar$B\")) ";
Result += "extern \"C\" ";
if (LangOpts.MicrosoftExt &&
IvarDecl->getAccessControl() != ObjCIvarDecl::Private &&
IvarDecl->getAccessControl() != ObjCIvarDecl::Package)
Result += "__declspec(dllimport) ";
Result += "unsigned long ";
if (IvarDecl->isBitField()) {
ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
GroupSymbolOutput.insert(std::make_pair(IDecl, GroupNo));
}
else
WriteInternalIvarName(CDecl, IvarDecl, Result);
Result += ";";
}
}
//===----------------------------------------------------------------------===//
// Meta Data Emission
//===----------------------------------------------------------------------===//
/// RewriteImplementations - This routine rewrites all method implementations
/// and emits meta-data.
void RewriteModernObjC::RewriteImplementations() {
int ClsDefCount = ClassImplementation.size();
int CatDefCount = CategoryImplementation.size();
// Rewrite implemented methods
for (int i = 0; i < ClsDefCount; i++) {
ObjCImplementationDecl *OIMP = ClassImplementation[i];
ObjCInterfaceDecl *CDecl = OIMP->getClassInterface();
if (CDecl->isImplicitInterfaceDecl())
assert(false &&
"Legacy implicit interface rewriting not supported in moder abi");
RewriteImplementationDecl(OIMP);
}
for (int i = 0; i < CatDefCount; i++) {
ObjCCategoryImplDecl *CIMP = CategoryImplementation[i];
ObjCInterfaceDecl *CDecl = CIMP->getClassInterface();
if (CDecl->isImplicitInterfaceDecl())
assert(false &&
"Legacy implicit interface rewriting not supported in moder abi");
RewriteImplementationDecl(CIMP);
}
}
void RewriteModernObjC::RewriteByRefString(std::string &ResultStr,
const std::string &Name,
ValueDecl *VD, bool def) {
assert(BlockByRefDeclNo.count(VD) &&
"RewriteByRefString: ByRef decl missing");
if (def)
ResultStr += "struct ";
ResultStr += "__Block_byref_" + Name +
"_" + utostr(BlockByRefDeclNo[VD]) ;
}
static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
return false;
}
std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
StringRef funcName,
std::string Tag) {
const FunctionType *AFT = CE->getFunctionType();
QualType RT = AFT->getReturnType();
std::string StructRef = "struct " + Tag;
SourceLocation BlockLoc = CE->getExprLoc();
std::string S;
ConvertSourceLocationToLineDirective(BlockLoc, S);
S += "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
funcName.str() + "_block_func_" + utostr(i);
BlockDecl *BD = CE->getBlockDecl();
if (isa<FunctionNoProtoType>(AFT)) {
// No user-supplied arguments. Still need to pass in a pointer to the
// block (to reference imported block decl refs).
S += "(" + StructRef + " *__cself)";
} else if (BD->param_empty()) {
S += "(" + StructRef + " *__cself)";
} else {
const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
assert(FT && "SynthesizeBlockFunc: No function proto");
S += '(';
// first add the implicit argument.
S += StructRef + " *__cself, ";
std::string ParamStr;
for (BlockDecl::param_iterator AI = BD->param_begin(),
E = BD->param_end(); AI != E; ++AI) {
if (AI != BD->param_begin()) S += ", ";
ParamStr = (*AI)->getNameAsString();
QualType QT = (*AI)->getType();
(void)convertBlockPointerToFunctionPointer(QT);
QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
S += ParamStr;
}
if (FT->isVariadic()) {
if (!BD->param_empty()) S += ", ";
S += "...";
}
S += ')';
}
S += " {\n";
// Create local declarations to avoid rewriting all closure decl ref exprs.
// First, emit a declaration for all "by ref" decls.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
S += " ";
std::string Name = (*I)->getNameAsString();
std::string TypeString;
RewriteByRefString(TypeString, Name, (*I));
TypeString += " *";
Name = TypeString + Name;
S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
}
// Next, emit a declaration for all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
S += " ";
// Handle nested closure invocation. For example:
//
// void (^myImportedClosure)(void);
// myImportedClosure = ^(void) { setGlobalInt(x + y); };
//
// void (^anotherClosure)(void);
// anotherClosure = ^(void) {
// myImportedClosure(); // import and invoke the closure
// };
//
if (isTopLevelBlockPointerType((*I)->getType())) {
RewriteBlockPointerTypeVariable(S, (*I));
S += " = (";
RewriteBlockPointerType(S, (*I)->getType());
S += ")";
S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
}
else {
std::string Name = (*I)->getNameAsString();
QualType QT = (*I)->getType();
if (HasLocalVariableExternalStorage(*I))
QT = Context->getPointerType(QT);
QT.getAsStringInternal(Name, Context->getPrintingPolicy());
S += Name + " = __cself->" +
(*I)->getNameAsString() + "; // bound by copy\n";
}
}
std::string RewrittenStr = RewrittenBlockExprs[CE];
const char *cstr = RewrittenStr.c_str();
while (*cstr++ != '{') ;
S += cstr;
S += "\n";
return S;
}
std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
StringRef funcName,
std::string Tag) {
std::string StructRef = "struct " + Tag;
std::string S = "static void __";
S += funcName;
S += "_block_copy_" + utostr(i);
S += "(" + StructRef;
S += "*dst, " + StructRef;
S += "*src) {";
for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_assign((void*)&dst->";
S += VD->getNameAsString();
S += ", (void*)src->";
S += VD->getNameAsString();
if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
else
S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
}
S += "}\n";
S += "\nstatic void __";
S += funcName;
S += "_block_dispose_" + utostr(i);
S += "(" + StructRef;
S += "*src) {";
for (ValueDecl *VD : ImportedBlockDecls) {
S += "_Block_object_dispose((void*)src->";
S += VD->getNameAsString();
if (BlockByRefDeclsPtrSet.count(VD))
S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
else if (VD->getType()->isBlockPointerType())
S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
else
S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
}
S += "}\n";
return S;
}
std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
std::string Desc) {
std::string S = "\nstruct " + Tag;
std::string Constructor = " " + Tag;
S += " {\n struct __block_impl impl;\n";
S += " struct " + Desc;
S += "* Desc;\n";
Constructor += "(void *fp, "; // Invoke function pointer.
Constructor += "struct " + Desc; // Descriptor pointer.
Constructor += " *desc";
if (BlockDeclRefs.size()) {
// Output all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
// Handle nested closure invocation. For example:
//
// void (^myImportedBlock)(void);
// myImportedBlock = ^(void) { setGlobalInt(x + y); };
//
// void (^anotherBlock)(void);
// anotherBlock = ^(void) {
// myImportedBlock(); // import and invoke the closure
// };
//
if (isTopLevelBlockPointerType((*I)->getType())) {
S += "struct __block_impl *";
Constructor += ", void *" + ArgName;
} else {
QualType QT = (*I)->getType();
if (HasLocalVariableExternalStorage(*I))
QT = Context->getPointerType(QT);
QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
Constructor += ", " + ArgName;
}
S += FieldName + ";\n";
}
// Output all "by ref" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
S += " ";
std::string FieldName = (*I)->getNameAsString();
std::string ArgName = "_" + FieldName;
{
std::string TypeString;
RewriteByRefString(TypeString, FieldName, (*I));
TypeString += " *";
FieldName = TypeString + FieldName;
ArgName = TypeString + ArgName;
Constructor += ", " + ArgName;
}
S += FieldName + "; // by ref\n";
}
// Finish writing the constructor.
Constructor += ", int flags=0)";
// Initialize all "by copy" arguments.
bool firsTime = true;
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
firsTime = false;
}
else
Constructor += ", ";
if (isTopLevelBlockPointerType((*I)->getType()))
Constructor += Name + "((struct __block_impl *)_" + Name + ")";
else
Constructor += Name + "(_" + Name + ")";
}
// Initialize all "by ref" arguments.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
std::string Name = (*I)->getNameAsString();
if (firsTime) {
Constructor += " : ";
firsTime = false;
}
else
Constructor += ", ";
Constructor += Name + "(_" + Name + "->__forwarding)";
}
Constructor += " {\n";
if (GlobalVarDecl)
Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
else
Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
Constructor += " Desc = desc;\n";
} else {
// Finish writing the constructor.
Constructor += ", int flags=0) {\n";
if (GlobalVarDecl)
Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
else
Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
Constructor += " Desc = desc;\n";
}
Constructor += " ";
Constructor += "}\n";
S += Constructor;
S += "};\n";
return S;
}
std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
std::string ImplTag, int i,
StringRef FunName,
unsigned hasCopy) {
std::string S = "\nstatic struct " + DescTag;
S += " {\n size_t reserved;\n";
S += " size_t Block_size;\n";
if (hasCopy) {
S += " void (*copy)(struct ";
S += ImplTag; S += "*, struct ";
S += ImplTag; S += "*);\n";
S += " void (*dispose)(struct ";
S += ImplTag; S += "*);\n";
}
S += "} ";
S += DescTag + "_DATA = { 0, sizeof(struct ";
S += ImplTag + ")";
if (hasCopy) {
S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
}
S += "};\n";
return S;
}
void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName) {
bool RewriteSC = (GlobalVarDecl &&
!Blocks.empty() &&
GlobalVarDecl->getStorageClass() == SC_Static &&
GlobalVarDecl->getType().getCVRQualifiers());
if (RewriteSC) {
std::string SC(" void __");
SC += GlobalVarDecl->getNameAsString();
SC += "() {}";
InsertText(FunLocStart, SC);
}
// Insert closures that were part of the function.
for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
CollectBlockDeclRefInfo(Blocks[i]);
// Need to copy-in the inner copied-in variables not actually used in this
// block.
for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
DeclRefExpr *Exp = InnerDeclRefs[count++];
ValueDecl *VD = Exp->getDecl();
BlockDeclRefs.push_back(Exp);
if (!VD->hasAttr<BlocksAttr>()) {
if (!BlockByCopyDeclsPtrSet.count(VD)) {
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
continue;
}
if (!BlockByRefDeclsPtrSet.count(VD)) {
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
// imported objects in the inner blocks not used in the outer
// blocks must be copied/disposed in the outer block as well.
if (VD->getType()->isObjCObjectPointerType() ||
VD->getType()->isBlockPointerType())
ImportedBlockDecls.insert(VD);
}
std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
InsertText(FunLocStart, CI);
std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
InsertText(FunLocStart, CF);
if (ImportedBlockDecls.size()) {
std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
InsertText(FunLocStart, HF);
}
std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
ImportedBlockDecls.size() > 0);
InsertText(FunLocStart, BD);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
}
if (RewriteSC) {
// Must insert any 'const/volatile/static here. Since it has been
// removed as result of rewriting of block literals.
std::string SC;
if (GlobalVarDecl->getStorageClass() == SC_Static)
SC = "static ";
if (GlobalVarDecl->getType().isConstQualified())
SC += "const ";
if (GlobalVarDecl->getType().isVolatileQualified())
SC += "volatile ";
if (GlobalVarDecl->getType().isRestrictQualified())
SC += "restrict ";
InsertText(FunLocStart, SC);
}
if (GlobalConstructionExp) {
// extra fancy dance for global literal expression.
// Always the latest block expression on the block stack.
std::string Tag = "__";
Tag += FunName;
Tag += "_block_impl_";
Tag += utostr(Blocks.size()-1);
std::string globalBuf = "static ";
globalBuf += Tag; globalBuf += " ";
std::string SStr;
llvm::raw_string_ostream constructorExprBuf(SStr);
GlobalConstructionExp->printPretty(constructorExprBuf, nullptr,
PrintingPolicy(LangOpts));
globalBuf += constructorExprBuf.str();
globalBuf += ";\n";
InsertText(FunLocStart, globalBuf);
GlobalConstructionExp = nullptr;
}
Blocks.clear();
InnerDeclRefsCount.clear();
InnerDeclRefs.clear();
RewrittenBlockExprs.clear();
}
void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
SourceLocation FunLocStart =
(!Blocks.empty()) ? getFunctionSourceLocation(*this, FD)
: FD->getTypeSpecStartLoc();
StringRef FuncName = FD->getName();
SynthesizeBlockLiterals(FunLocStart, FuncName);
}
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
Name = IFace->getName();
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
while ((loc = Name.find(":", loc)) != std::string::npos)
Name.replace(loc, 1, "_");
}
void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
//fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
//SourceLocation FunLocStart = MD->getLocStart();
SourceLocation FunLocStart = MD->getLocStart();
std::string FuncName;
BuildUniqueMethodName(FuncName, MD);
SynthesizeBlockLiterals(FunLocStart, FuncName);
}
void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) {
for (Stmt *SubStmt : S->children())
if (SubStmt) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt))
GetBlockDeclRefExprs(CBE->getBody());
else
GetBlockDeclRefExprs(SubStmt);
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
if (DRE->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DRE->getDecl()))
// FIXME: Handle enums.
BlockDeclRefs.push_back(DRE);
return;
}
void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs,
llvm::SmallPtrSetImpl<const DeclContext *> &InnerContexts) {
for (Stmt *SubStmt : S->children())
if (SubStmt) {
if (BlockExpr *CBE = dyn_cast<BlockExpr>(SubStmt)) {
InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
GetInnerBlockDeclRefExprs(CBE->getBody(),
InnerBlockDeclRefs,
InnerContexts);
}
else
GetInnerBlockDeclRefExprs(SubStmt, InnerBlockDeclRefs, InnerContexts);
}
// Handle specific things.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
if (DRE->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DRE->getDecl())) {
if (!InnerContexts.count(DRE->getDecl()->getDeclContext()))
InnerBlockDeclRefs.push_back(DRE);
if (VarDecl *Var = cast<VarDecl>(DRE->getDecl()))
if (Var->isFunctionOrMethodVarDecl())
ImportedLocalExternalDecls.insert(Var);
}
}
return;
}
/// convertObjCTypeToCStyleType - This routine converts such objc types
/// as qualified objects, and blocks to their closest c/c++ types that
/// it can. It returns true if input type was modified.
bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) {
QualType oldT = T;
convertBlockPointerToFunctionPointer(T);
if (T->isFunctionPointerType()) {
QualType PointeeTy;
if (const PointerType* PT = T->getAs<PointerType>()) {
PointeeTy = PT->getPointeeType();
if (const FunctionType *FT = PointeeTy->getAs<FunctionType>()) {
T = convertFunctionTypeOfBlocks(FT);
T = Context->getPointerType(T);
}
}
}
convertToUnqualifiedObjCType(T);
return T != oldT;
}
/// convertFunctionTypeOfBlocks - This routine converts a function type
/// whose result type may be a block pointer or whose argument type(s)
/// might be block pointers to an equivalent function type replacing
/// all block pointers to function pointers.
QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
QualType Res = FT->getReturnType();
bool modified = convertObjCTypeToCStyleType(Res);
if (FTP) {
for (auto &I : FTP->param_types()) {
QualType t = I;
// Make sure we convert "t (^)(...)" to "t (*)(...)".
if (convertObjCTypeToCStyleType(t))
modified = true;
ArgTypes.push_back(t);
}
}
QualType FuncType;
if (modified)
FuncType = getSimpleFunctionType(Res, ArgTypes);
else FuncType = QualType(FT, 0);
return FuncType;
}
Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
// Navigate to relevant type information.
const BlockPointerType *CPT = nullptr;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
CPT = DRE->getType()->getAs<BlockPointerType>();
} else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
CPT = MExpr->getType()->getAs<BlockPointerType>();
}
else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
return SynthesizeBlockCall(Exp, PRE->getSubExpr());
}
else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
CPT = IEXPR->getType()->getAs<BlockPointerType>();
else if (const ConditionalOperator *CEXPR =
dyn_cast<ConditionalOperator>(BlockExp)) {
Expr *LHSExp = CEXPR->getLHS();
Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
Expr *RHSExp = CEXPR->getRHS();
Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
Expr *CONDExp = CEXPR->getCond();
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(CONDExp,
SourceLocation(), cast<Expr>(LHSStmt),
SourceLocation(), cast<Expr>(RHSStmt),
Exp->getType(), VK_RValue, OK_Ordinary);
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
} else if (const PseudoObjectExpr *POE
= dyn_cast<PseudoObjectExpr>(BlockExp)) {
CPT = POE->getType()->castAs<BlockPointerType>();
} else {
assert(1 && "RewriteBlockClass: Bad type");
}
assert(CPT && "RewriteBlockClass: Bad type");
const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
assert(FT && "RewriteBlockClass: Bad type");
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
// Generate a funky cast.
SmallVector<QualType, 8> ArgTypes;
// Push the block argument type.
ArgTypes.push_back(PtrBlock);
if (FTP) {
for (auto &I : FTP->param_types()) {
QualType t = I;
// Make sure we convert "t (^)(...)" to "t (*)(...)".
if (!convertBlockPointerToFunctionPointer(t))
convertToUnqualifiedObjCType(t);
ArgTypes.push_back(t);
}
}
// Now do the pointer to function cast.
QualType PtrToFuncCastType = getSimpleFunctionType(Exp->getType(), ArgTypes);
PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
CK_BitCast,
const_cast<Expr*>(BlockExp));
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
BlkCast);
//PE->dump();
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("FuncPtr"),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
MemberExpr *ME =
new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
CK_BitCast, ME);
PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
SmallVector<Expr*, 8> BlkExprs;
// Add the implicit argument.
BlkExprs.push_back(BlkCast);
// Add the user arguments.
for (CallExpr::arg_iterator I = Exp->arg_begin(),
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
Exp->getType(), VK_RValue,
SourceLocation());
return CE;
}
// We need to return the rewritten expression to handle cases where the
// DeclRefExpr is embedded in another expression being rewritten.
// For example:
//
// int main() {
// __block Foo *f;
// __block int i;
//
// void (^myblock)() = ^() {
// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten).
// i = 77;
// };
//}
Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
// Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
// for each DeclRefExp where BYREFVAR is name of the variable.
ValueDecl *VD = DeclRefExp->getDecl();
bool isArrow = DeclRefExp->refersToEnclosingVariableOrCapture() ||
HasLocalVariableExternalStorage(DeclRefExp->getDecl());
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get("__forwarding"),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
StringRef Name = VD->getName();
FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(),
&Context->Idents.get(Name),
Context->VoidPtrTy, nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/true,
ICIS_NoInit);
ME =
new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(),
DeclRefExp->getType(), VK_LValue, OK_Ordinary);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
DeclRefExp->getExprLoc(),
ME);
ReplaceStmt(DeclRefExp, PE);
return PE;
}
// Rewrites the imported local variable V with external storage
// (static, extern, etc.) as *V
//
Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
ValueDecl *VD = DRE->getDecl();
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
VK_LValue, OK_Ordinary,
DRE->getLocation());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
ReplaceStmt(DRE, PE);
return PE;
}
void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) {
SourceLocation LocStart = CE->getLParenLoc();
SourceLocation LocEnd = CE->getRParenLoc();
// Need to avoid trying to rewrite synthesized casts.
if (LocStart.isInvalid())
return;
// Need to avoid trying to rewrite casts contained in macros.
if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
return;
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
QualType QT = CE->getType();
const Type* TypePtr = QT->getAs<Type>();
if (isa<TypeOfExprType>(TypePtr)) {
const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
std::string TypeAsString = "(";
RewriteBlockPointerType(TypeAsString, QT);
TypeAsString += ")";
ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
return;
}
// advance the location to startArgList.
const char *argPtr = startBuf;
while (*argPtr++ && (argPtr < endBuf)) {
switch (*argPtr) {
case '^':
// Replace the '^' with '*'.
LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
ReplaceText(LocStart, 1, "*");
break;
}
}
return;
}
void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
CastKind CastKind = IC->getCastKind();
if (CastKind != CK_BlockPointerToObjCPointerCast &&
CastKind != CK_AnyPointerToBlockPointerCast)
return;
QualType QT = IC->getType();
(void)convertBlockPointerToFunctionPointer(QT);
std::string TypeString(QT.getAsString(Context->getPrintingPolicy()));
std::string Str = "(";
Str += TypeString;
Str += ")";
InsertText(IC->getSubExpr()->getLocStart(), &Str[0], Str.size());
return;
}
void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
SourceLocation DeclLoc = FD->getLocation();
unsigned parenCount = 0;
// We have 1 or more arguments that have closure pointers.
const char *startBuf = SM->getCharacterData(DeclLoc);
const char *startArgList = strchr(startBuf, '(');
assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
parenCount++;
// advance the location to startArgList.
DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
assert((DeclLoc.isValid()) && "Invalid DeclLoc");
const char *argPtr = startArgList;
while (*argPtr++ && parenCount) {
switch (*argPtr) {
case '^':
// Replace the '^' with '*'.
DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
ReplaceText(DeclLoc, 1, "*");
break;
case '(':
parenCount++;
break;
case ')':
parenCount--;
break;
}
}
return;
}
bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
const FunctionProtoType *FTP;
const PointerType *PT = QT->getAs<PointerType>();
if (PT) {
FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
} else {
const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
}
if (FTP) {
for (const auto &I : FTP->param_types())
if (isTopLevelBlockPointerType(I))
return true;
}
return false;
}
bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
const FunctionProtoType *FTP;
const PointerType *PT = QT->getAs<PointerType>();
if (PT) {
FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
} else {
const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
}
if (FTP) {
for (const auto &I : FTP->param_types()) {
if (I->isObjCQualifiedIdType())
return true;
if (I->isObjCObjectPointerType() &&
I->getPointeeType()->isObjCQualifiedInterfaceType())
return true;
}
}
return false;
}
void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen) {
const char *argPtr = strchr(Name, '(');
assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
LParen = argPtr; // output the start.
argPtr++; // skip past the left paren.
unsigned parenCount = 1;
while (*argPtr && parenCount) {
switch (*argPtr) {
case '(': parenCount++; break;
case ')': parenCount--; break;
default: break;
}
if (parenCount) argPtr++;
}
assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
RParen = argPtr; // output the end
}
void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
RewriteBlockPointerFunctionArgs(FD);
return;
}
// Handle Variables and Typedefs.
SourceLocation DeclLoc = ND->getLocation();
QualType DeclT;
if (VarDecl *VD = dyn_cast<VarDecl>(ND))
DeclT = VD->getType();
else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
DeclT = TDD->getUnderlyingType();
else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
DeclT = FD->getType();
else
llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
const char *startBuf = SM->getCharacterData(DeclLoc);
const char *endBuf = startBuf;
// scan backward (from the decl location) for the end of the previous decl.
while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
startBuf--;
SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
std::string buf;
unsigned OrigLength=0;
// *startBuf != '^' if we are dealing with a pointer to function that
// may take block argument types (which will be handled below).
if (*startBuf == '^') {
// Replace the '^' with '*', computing a negative offset.
buf = '*';
startBuf++;
OrigLength++;
}
while (*startBuf != ')') {
buf += *startBuf;
startBuf++;
OrigLength++;
}
buf += ')';
OrigLength++;
if (PointerTypeTakesAnyBlockArguments(DeclT) ||
PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
// Replace the '^' with '*' for arguments.
// Replace id<P> with id/*<>*/
DeclLoc = ND->getLocation();
startBuf = SM->getCharacterData(DeclLoc);
const char *argListBegin, *argListEnd;
GetExtentOfArgList(startBuf, argListBegin, argListEnd);
while (argListBegin < argListEnd) {
if (*argListBegin == '^')
buf += '*';
else if (*argListBegin == '<') {
buf += "/*";
buf += *argListBegin++;
OrigLength++;
while (*argListBegin != '>') {
buf += *argListBegin++;
OrigLength++;
}
buf += *argListBegin;
buf += "*/";
}
else
buf += *argListBegin;
argListBegin++;
OrigLength++;
}
buf += ')';
OrigLength++;
}
ReplaceText(Start, OrigLength, buf);
return;
}
/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
/// struct Block_byref_id_object *src) {
/// _Block_object_assign (&_dest->object, _src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
/// [|BLOCK_FIELD_IS_WEAK]) // object
/// _Block_object_assign(&_dest->object, _src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
/// [|BLOCK_FIELD_IS_WEAK]) // block
/// }
/// And:
/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
/// _Block_object_dispose(_src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
/// [|BLOCK_FIELD_IS_WEAK]) // object
/// _Block_object_dispose(_src->object,
/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
/// [|BLOCK_FIELD_IS_WEAK]) // block
/// }
std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
int flag) {
std::string S;
if (CopyDestroyCache.count(flag))
return S;
CopyDestroyCache.insert(flag);
S = "static void __Block_byref_id_object_copy_";
S += utostr(flag);
S += "(void *dst, void *src) {\n";
// offset into the object pointer is computed as:
// void * + void* + int + int + void* + void *
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
unsigned VoidPtrSize =
static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
S += " _Block_object_assign((char*)dst + ";
S += utostr(offset);
S += ", *(void * *) ((char*)src + ";
S += utostr(offset);
S += "), ";
S += utostr(flag);
S += ");\n}\n";
S += "static void __Block_byref_id_object_dispose_";
S += utostr(flag);
S += "(void *src) {\n";
S += " _Block_object_dispose(*(void * *) ((char*)src + ";
S += utostr(offset);
S += "), ";
S += utostr(flag);
S += ");\n}\n";
return S;
}
/// RewriteByRefVar - For each __block typex ND variable this routine transforms
/// the declaration into:
/// struct __Block_byref_ND {
/// void *__isa; // NULL for everything except __weak pointers
/// struct __Block_byref_ND *__forwarding;
/// int32_t __flags;
/// int32_t __size;
/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
/// typex ND;
/// };
///
/// It then replaces declaration of ND variable with:
/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
/// __size=sizeof(struct __Block_byref_ND),
/// ND=initializer-if-any};
///
///
void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
bool lastDecl) {
int flag = 0;
int isa = 0;
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
if (DeclLoc.isInvalid())
// If type location is missing, it is because of missing type (a warning).
// Use variable's location which is good for this case.
DeclLoc = ND->getLocation();
const char *startBuf = SM->getCharacterData(DeclLoc);
SourceLocation X = ND->getLocEnd();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
std::string Name(ND->getNameAsString());
std::string ByrefType;
RewriteByRefString(ByrefType, Name, ND, true);
ByrefType += " {\n";
ByrefType += " void *__isa;\n";
RewriteByRefString(ByrefType, Name, ND);
ByrefType += " *__forwarding;\n";
ByrefType += " int __flags;\n";
ByrefType += " int __size;\n";
// Add void *__Block_byref_id_object_copy;
// void *__Block_byref_id_object_dispose; if needed.
QualType Ty = ND->getType();
bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty, ND);
if (HasCopyAndDispose) {
ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
}
QualType T = Ty;
(void)convertBlockPointerToFunctionPointer(T);
T.getAsStringInternal(Name, Context->getPrintingPolicy());
ByrefType += " " + Name + ";\n";
ByrefType += "};\n";
// Insert this type in global scope. It is needed by helper function.
SourceLocation FunLocStart;
if (CurFunctionDef)
FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
else {
assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
FunLocStart = CurMethodDef->getLocStart();
}
InsertText(FunLocStart, ByrefType);
if (Ty.isObjCGCWeak()) {
flag |= BLOCK_FIELD_IS_WEAK;
isa = 1;
}
if (HasCopyAndDispose) {
flag = BLOCK_BYREF_CALLER;
QualType Ty = ND->getType();
// FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
if (Ty->isBlockPointerType())
flag |= BLOCK_FIELD_IS_BLOCK;
else
flag |= BLOCK_FIELD_IS_OBJECT;
std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
if (!HF.empty())
Preamble += HF;
}
// struct __Block_byref_ND ND =
// {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
// initializer-if-any};
bool hasInit = (ND->getInit() != nullptr);
// FIXME. rewriter does not support __block c++ objects which
// require construction.
if (hasInit)
if (CXXConstructExpr *CExp = dyn_cast<CXXConstructExpr>(ND->getInit())) {
CXXConstructorDecl *CXXDecl = CExp->getConstructor();
if (CXXDecl && CXXDecl->isDefaultConstructor())
hasInit = false;
}
unsigned flags = 0;
if (HasCopyAndDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
Name = ND->getNameAsString();
ByrefType.clear();
RewriteByRefString(ByrefType, Name, ND);
std::string ForwardingCastType("(");
ForwardingCastType += ByrefType + " *)";
ByrefType += " " + Name + " = {(void*)";
ByrefType += utostr(isa);
ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
ByrefType += utostr(flags);
ByrefType += ", ";
ByrefType += "sizeof(";
RewriteByRefString(ByrefType, Name, ND);
ByrefType += ")";
if (HasCopyAndDispose) {
ByrefType += ", __Block_byref_id_object_copy_";
ByrefType += utostr(flag);
ByrefType += ", __Block_byref_id_object_dispose_";
ByrefType += utostr(flag);
}
if (!firstDecl) {
// In multiple __block declarations, and for all but 1st declaration,
// find location of the separating comma. This would be start location
// where new text is to be inserted.
DeclLoc = ND->getLocation();
const char *startDeclBuf = SM->getCharacterData(DeclLoc);
const char *commaBuf = startDeclBuf;
while (*commaBuf != ',')
commaBuf--;
assert((*commaBuf == ',') && "RewriteByRefVar: can't find ','");
DeclLoc = DeclLoc.getLocWithOffset(commaBuf - startDeclBuf);
startBuf = commaBuf;
}
if (!hasInit) {
ByrefType += "};\n";
unsigned nameSize = Name.size();
// for block or function pointer declaration. Name is aleady
// part of the declaration.
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
nameSize = 1;
ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
}
else {
ByrefType += ", ";
SourceLocation startLoc;
Expr *E = ND->getInit();
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
startLoc = E->getLocStart();
startLoc = SM->getExpansionLoc(startLoc);
endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
const char separator = lastDecl ? ';' : ',';
const char *startInitializerBuf = SM->getCharacterData(startLoc);
const char *separatorBuf = strchr(startInitializerBuf, separator);
assert((*separatorBuf == separator) &&
"RewriteByRefVar: can't find ';' or ','");
SourceLocation separatorLoc =
startLoc.getLocWithOffset(separatorBuf-startInitializerBuf);
InsertText(separatorLoc, lastDecl ? "}" : "};\n");
}
return;
}
void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
// Add initializers for any closure decl refs.
GetBlockDeclRefExprs(Exp->getBody());
if (BlockDeclRefs.size()) {
// Unique all "by copy" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
}
}
// Unique all "by ref" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
}
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
BlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
}
}
FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) {
IdentifierInfo *ID = &Context->Idents.get(name);
QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, FType, nullptr, SC_Extern,
false, false);
}
Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
const SmallVectorImpl<DeclRefExpr *> &InnerBlockDeclRefs) {
const BlockDecl *block = Exp->getBlockDecl();
Blocks.push_back(Exp);
CollectBlockDeclRefInfo(Exp);
// Add inner imported variables now used in current block.
int countOfInnerDecls = 0;
if (!InnerBlockDeclRefs.empty()) {
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
DeclRefExpr *Exp = InnerBlockDeclRefs[i];
ValueDecl *VD = Exp->getDecl();
if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
// We need to save the copied-in variables in nested
// blocks because it is needed at the end for some of the API generations.
// See SynthesizeBlockLiterals routine.
InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
}
InnerDeclRefsCount.push_back(countOfInnerDecls);
std::string FuncName;
if (CurFunctionDef)
FuncName = CurFunctionDef->getNameAsString();
else if (CurMethodDef)
BuildUniqueMethodName(FuncName, CurMethodDef);
else if (GlobalVarDecl)
FuncName = std::string(GlobalVarDecl->getNameAsString());
bool GlobalBlockExpr =
block->getDeclContext()->getRedeclContext()->isFileContext();
if (GlobalBlockExpr && !GlobalVarDecl) {
Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag);
GlobalBlockExpr = false;
}
std::string BlockNumber = utostr(Blocks.size()-1);
std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
// Get a pointer to the function type so we can cast appropriately.
QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
QualType FType = Context->getPointerType(BFT);
FunctionDecl *FD;
Expr *NewRep;
// Simulate a constructor call...
std::string Tag;
if (GlobalBlockExpr)
Tag = "__global_";
else
Tag = "__";
Tag += FuncName + "_block_impl_" + BlockNumber;
FD = SynthBlockInitFunctionDecl(Tag);
DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
SourceLocation());
SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
VK_LValue, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
InitExprs.push_back(castExpr);
// Initialize the block descriptor.
std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData.c_str()),
Context->VoidPtrTy, nullptr,
SC_Static);
UnaryOperator *DescRefExpr =
new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
Context->VoidPtrTy,
VK_LValue,
SourceLocation()),
UO_AddrOf,
Context->getPointerType(Context->VoidPtrTy),
VK_RValue, OK_Ordinary,
SourceLocation());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
if (BlockDeclRefs.size()) {
Expr *Exp;
// Output all "by copy" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
OK_Ordinary, SourceLocation());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
VK_LValue, SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
OK_Ordinary, SourceLocation());
}
}
InitExprs.push_back(Exp);
}
// Output all "by ref" declarations.
for (SmallVectorImpl<ValueDecl *>::iterator I = BlockByRefDecls.begin(),
E = BlockByRefDecls.end(); I != E; ++I) {
ValueDecl *ND = (*I);
std::string Name(ND->getNameAsString());
std::string RecName;
RewriteByRefString(RecName, Name, ND, true);
IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ sizeof("struct"));
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
bool isNestedCapturedVar = false;
if (block)
for (const auto &CI : block->captures()) {
const VarDecl *variable = CI.getVariable();
if (variable == ND && CI.isNested()) {
assert (CI.isByRef() &&
"SynthBlockInitExpr - captured block variable is not byref");
isNestedCapturedVar = true;
break;
}
}
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
Context->getPointerType(Exp->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
}
if (ImportedBlockDecls.size()) {
// generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
unsigned IntSize =
static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
FType, VK_LValue, SourceLocation());
if (GlobalBlockExpr) {
assert (!GlobalConstructionExp &&
"SynthBlockInitExpr - GlobalConstructionExp must be null");
GlobalConstructionExp = NewRep;
NewRep = DRE;
}
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
// Put Paren around the call.
NewRep = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
NewRep);
BlockDeclRefs.clear();
BlockByRefDecls.clear();
BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
BlockByCopyDeclsPtrSet.clear();
ImportedBlockDecls.clear();
return NewRep;
}
bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
if (const ObjCForCollectionStmt * CS =
dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
return CS->getElement() == DS;
return false;
}
//===----------------------------------------------------------------------===//
// Function Body / Expression rewriting
//===----------------------------------------------------------------------===//
Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
isa<DoStmt>(S) || isa<ForStmt>(S))
Stmts.push_back(S);
else if (isa<ObjCForCollectionStmt>(S)) {
Stmts.push_back(S);
ObjCBcLabelNo.push_back(++BcLabelCount);
}
// Pseudo-object operations and ivar references need special
// treatment because we're going to recursively rewrite them.
if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
return RewritePropertyOrImplicitSetter(PseudoOp);
} else {
return RewritePropertyOrImplicitGetter(PseudoOp);
}
} else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
return RewriteObjCIvarRefExpr(IvarRefExpr);
}
else if (isa<OpaqueValueExpr>(S))
S = cast<OpaqueValueExpr>(S)->getSourceExpr();
SourceRange OrigStmtRange = S->getSourceRange();
// Perform a bottom up rewrite of all children.
for (Stmt *&childStmt : S->children())
if (childStmt) {
Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
if (newStmt) {
childStmt = newStmt;
}
}
if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
InnerContexts.insert(BE->getBlockDecl());
ImportedLocalExternalDecls.clear();
GetInnerBlockDeclRefExprs(BE->getBody(),
InnerBlockDeclRefs, InnerContexts);
// Rewrite the block body in place.
Stmt *SaveCurrentBody = CurrentBody;
CurrentBody = BE->getBody();
PropParentMap = nullptr;
// block literal on rhs of a property-dot-sytax assignment
// must be replaced by its synthesize ast so getRewrittenText
// works as expected. In this case, what actually ends up on RHS
// is the blockTranscribed which is the helper function for the
// block literal; as in: self.c = ^() {[ace ARR];};
bool saveDisableReplaceStmt = DisableReplaceStmt;
DisableReplaceStmt = false;
RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
DisableReplaceStmt = saveDisableReplaceStmt;
CurrentBody = SaveCurrentBody;
PropParentMap = nullptr;
ImportedLocalExternalDecls.clear();
// Now we snarf the rewritten text and stash it away for later use.
std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
RewrittenBlockExprs[BE] = Str;
Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
//blockTranscribed->dump();
ReplaceStmt(S, blockTranscribed);
return blockTranscribed;
}
// Handle specific things.
if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
return RewriteAtEncode(AtEncode);
if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
return RewriteAtSelector(AtSelector);
if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
return RewriteObjCStringLiteral(AtString);
if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast<ObjCBoolLiteralExpr>(S))
return RewriteObjCBoolLiteralExpr(BoolLitExpr);
if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(S))
return RewriteObjCBoxedExpr(BoxedExpr);
if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast<ObjCArrayLiteral>(S))
return RewriteObjCArrayLiteralExpr(ArrayLitExpr);
if (ObjCDictionaryLiteral *DictionaryLitExpr =
dyn_cast<ObjCDictionaryLiteral>(S))
return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr);
if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
#if 0
// Before we rewrite it, put the original message expression in a comment.
SourceLocation startLoc = MessExpr->getLocStart();
SourceLocation endLoc = MessExpr->getLocEnd();
const char *startBuf = SM->getCharacterData(startLoc);
const char *endBuf = SM->getCharacterData(endLoc);
std::string messString;
messString += "// ";
messString.append(startBuf, endBuf-startBuf+1);
messString += "\n";
// FIXME: Missing definition of
// InsertText(clang::SourceLocation, char const*, unsigned int).
// InsertText(startLoc, messString.c_str(), messString.size());
// Tried this, but it didn't work either...
// ReplaceText(startLoc, 0, messString.c_str(), messString.size());
#endif
return RewriteMessageExpr(MessExpr);
}
if (ObjCAutoreleasePoolStmt *StmtAutoRelease =
dyn_cast<ObjCAutoreleasePoolStmt>(S)) {
return RewriteObjCAutoreleasePoolStmt(StmtAutoRelease);
}
if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
return RewriteObjCTryStmt(StmtTry);
if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
return RewriteObjCSynchronizedStmt(StmtTry);
if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
return RewriteObjCThrowStmt(StmtThrow);
if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
return RewriteObjCProtocolExpr(ProtocolExp);
if (ObjCForCollectionStmt *StmtForCollection =
dyn_cast<ObjCForCollectionStmt>(S))
return RewriteObjCForCollectionStmt(StmtForCollection,
OrigStmtRange.getEnd());
if (BreakStmt *StmtBreakStmt =
dyn_cast<BreakStmt>(S))
return RewriteBreakStmt(StmtBreakStmt);
if (ContinueStmt *StmtContinueStmt =
dyn_cast<ContinueStmt>(S))
return RewriteContinueStmt(StmtContinueStmt);
// Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
// and cast exprs.
if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
// FIXME: What we're doing here is modifying the type-specifier that
// precedes the first Decl. In the future the DeclGroup should have
// a separate type-specifier that we can rewrite.
// NOTE: We need to avoid rewriting the DeclStmt if it is within
// the context of an ObjCForCollectionStmt. For example:
// NSArray *someArray;
// for (id <FooProtocol> index in someArray) ;
// This is because RewriteObjCForCollectionStmt() does textual rewriting
// and it depends on the original text locations/positions.
if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
// Blocks rewrite rules.
for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
DI != DE; ++DI) {
Decl *SD = *DI;
if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
if (isTopLevelBlockPointerType(ND->getType()))
RewriteBlockPointerDecl(ND);
else if (ND->getType()->isFunctionPointerType())
CheckFunctionPointerDecl(ND->getType(), ND);
if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
if (VD->hasAttr<BlocksAttr>()) {
static unsigned uniqueByrefDeclCount = 0;
assert(!BlockByRefDeclNo.count(ND) &&
"RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
RewriteByRefVar(VD, (DI == DS->decl_begin()), ((DI+1) == DE));
}
else
RewriteTypeOfDecl(VD);
}
}
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
}
}
}
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
RewriteObjCQualifiedInterfaceTypes(CE);
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
isa<DoStmt>(S) || isa<ForStmt>(S)) {
assert(!Stmts.empty() && "Statement stack is empty");
assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
&& "Statement stack mismatch");
Stmts.pop_back();
}
// Handle blocks rewriting.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
ValueDecl *VD = DRE->getDecl();
if (VD->hasAttr<BlocksAttr>())
return RewriteBlockDeclRefExpr(DRE);
if (HasLocalVariableExternalStorage(VD))
return RewriteLocalVariableExternalStorage(DRE);
}
if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
if (CE->getCallee()->getType()->isBlockPointerType()) {
Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
ReplaceStmt(S, BlockCall);
return BlockCall;
}
}
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
RewriteCastExpr(CE);
}
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
RewriteImplicitCastObjCExpr(ICE);
}
#if 0
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
ICE->getSubExpr(),
SourceLocation());
// Get the new text.
std::string SStr;
llvm::raw_string_ostream Buf(SStr);
Replacement->printPretty(Buf);
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
delete S;
return Replacement;
}
#endif
// Return this stmt unmodified.
return S;
}
void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) {
for (auto *FD : RD->fields()) {
if (isTopLevelBlockPointerType(FD->getType()))
RewriteBlockPointerDecl(FD);
if (FD->getType()->isObjCQualifiedIdType() ||
FD->getType()->isObjCQualifiedInterfaceType())
RewriteObjCQualifiedInterfaceTypes(FD);
}
}
/// HandleDeclInMainFile - This is called for each top-level decl defined in the
/// main file of the input.
void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
switch (D->getKind()) {
case Decl::Function: {
FunctionDecl *FD = cast<FunctionDecl>(D);
if (FD->isOverloadedOperator())
return;
// Since function prototypes don't have ParmDecl's, we check the function
// prototype. This enables us to rewrite function declarations and
// definitions using the same code.
RewriteBlocksInFunctionProtoType(FD->getType(), FD);
if (!FD->isThisDeclarationADefinition())
break;
// FIXME: If this should support Obj-C++, support CXXTryStmt
if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
CurFunctionDef = FD;
CurrentBody = Body;
Body =
cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
FD->setBody(Body);
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
// This synthesizes and inserts the block "impl" struct, invoke function,
// and any copy/dispose helper functions.
InsertBlockLiteralsWithinFunction(FD);
RewriteLineDirective(D);
CurFunctionDef = nullptr;
}
break;
}
case Decl::ObjCMethod: {
ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
if (CompoundStmt *Body = MD->getCompoundBody()) {
CurMethodDef = MD;
CurrentBody = Body;
Body =
cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
MD->setBody(Body);
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
InsertBlockLiteralsWithinMethod(MD);
RewriteLineDirective(D);
CurMethodDef = nullptr;
}
break;
}
case Decl::ObjCImplementation: {
ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
ClassImplementation.push_back(CI);
break;
}
case Decl::ObjCCategoryImpl: {
ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
CategoryImplementation.push_back(CI);
break;
}
case Decl::Var: {
VarDecl *VD = cast<VarDecl>(D);
RewriteObjCQualifiedInterfaceTypes(VD);
if (isTopLevelBlockPointerType(VD->getType()))
RewriteBlockPointerDecl(VD);
else if (VD->getType()->isFunctionPointerType()) {
CheckFunctionPointerDecl(VD->getType(), VD);
if (VD->getInit()) {
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
RewriteCastExpr(CE);
}
}
} else if (VD->getType()->isRecordType()) {
RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
}
if (VD->getInit()) {
GlobalVarDecl = VD;
CurrentBody = VD->getInit();
RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
CurrentBody = nullptr;
if (PropParentMap) {
delete PropParentMap;
PropParentMap = nullptr;
}
SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
GlobalVarDecl = nullptr;
// This is needed for blocks.
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
RewriteCastExpr(CE);
}
}
break;
}
case Decl::TypeAlias:
case Decl::Typedef: {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
else
RewriteObjCQualifiedInterfaceTypes(TD);
}
break;
}
case Decl::CXXRecord:
case Decl::Record: {
RecordDecl *RD = cast<RecordDecl>(D);
if (RD->isCompleteDefinition())
RewriteRecordBody(RD);
break;
}
default:
break;
}
// Nothing yet.
}
/// Write_ProtocolExprReferencedMetadata - This routine writer out the
/// protocol reference symbols in the for of:
/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA.
static void Write_ProtocolExprReferencedMetadata(ASTContext *Context,
ObjCProtocolDecl *PDecl,
std::string &Result) {
// Also output .objc_protorefs$B section and its meta-data.
if (Context->getLangOpts().MicrosoftExt)
Result += "static ";
Result += "struct _protocol_t *";
Result += "_OBJC_PROTOCOL_REFERENCE_$_";
Result += PDecl->getNameAsString();
Result += " = &";
Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
Result += ";\n";
}
void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
if (Diags.hasErrorOccurred())
return;
RewriteInclude();
for (unsigned i = 0, e = FunctionDefinitionsSeen.size(); i < e; i++) {
// translation of function bodies were postponed until all class and
// their extensions and implementations are seen. This is because, we
// cannot build grouping structs for bitfields until they are all seen.
FunctionDecl *FDecl = FunctionDefinitionsSeen[i];
HandleTopLevelSingleDecl(FDecl);
}
// Here's a great place to add any extra declarations that may be needed.
// Write out meta data for each @protocol(<expr>).
for (ObjCProtocolDecl *ProtDecl : ProtocolExprDecls) {
RewriteObjCProtocolMetaData(ProtDecl, Preamble);
Write_ProtocolExprReferencedMetadata(Context, ProtDecl, Preamble);
}
InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
if (ClassImplementation.size() || CategoryImplementation.size())
RewriteImplementations();
for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) {
ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i];
// Write struct declaration for the class matching its ivar declarations.
// Note that for modern abi, this is postponed until the end of TU
// because class extensions and the implementation might declare their own
// private ivars.
RewriteInterfaceDecl(CDecl);
}
// Get the buffer corresponding to MainFileID. If we haven't changed it, then
// we are done.
if (const RewriteBuffer *RewriteBuf =
Rewrite.getRewriteBufferFor(MainFileID)) {
//printf("Changed:\n");
*OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
} else {
llvm::errs() << "No changes\n";
}
if (ClassImplementation.size() || CategoryImplementation.size() ||
ProtocolExprDecls.size()) {
// Rewrite Objective-c meta data*
std::string ResultStr;
RewriteMetaDataIntoBuffer(ResultStr);
// Emit metadata.
*OutFile << ResultStr;
}
// Emit ImageInfo;
{
std::string ResultStr;
WriteImageInfo(ResultStr);
*OutFile << ResultStr;
}
OutFile->flush();
}
void RewriteModernObjC::Initialize(ASTContext &context) {
InitializeCommon(context);
Preamble += "#ifndef __OBJC2__\n";
Preamble += "#define __OBJC2__\n";
Preamble += "#endif\n";
// declaring objc_selector outside the parameter list removes a silly
// scope related warning...
if (IsHeader)
Preamble = "#pragma once\n";
Preamble += "struct objc_selector; struct objc_class;\n";
Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; ";
Preamble += "\n\tstruct objc_object *superClass; ";
// Add a constructor for creating temporary objects.
Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) ";
Preamble += ": object(o), superClass(s) {} ";
Preamble += "\n};\n";
if (LangOpts.MicrosoftExt) {
// Define all sections using syntax that makes sense.
// These are currently generated.
Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n";
// These are generated but not necessary for functionality.
Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n";
Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n";
Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n";
// These need be generated for performance. Currently they are not,
// using API calls instead.
Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n";
Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n";
}
Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
Preamble += "typedef struct objc_object Protocol;\n";
Preamble += "#define _REWRITER_typedef_Protocol\n";
Preamble += "#endif\n";
if (LangOpts.MicrosoftExt) {
Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
}
else
Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
Preamble += "(struct objc_class *);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getMetaClass";
Preamble += "(const char *);\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
// @synchronized hooks.
Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter( struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit( struct objc_object *);\n";
Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
Preamble += "#ifdef _WIN64\n";
Preamble += "typedef unsigned long long _WIN_NSUInteger;\n";
Preamble += "#else\n";
Preamble += "typedef unsigned int _WIN_NSUInteger;\n";
Preamble += "#endif\n";
Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
Preamble += "struct __objcFastEnumerationState {\n\t";
Preamble += "unsigned long state;\n\t";
Preamble += "void **itemsPtr;\n\t";
Preamble += "unsigned long *mutationsPtr;\n\t";
Preamble += "unsigned long extra[5];\n};\n";
Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
Preamble += "#define __FASTENUMERATIONSTATE\n";
Preamble += "#endif\n";
Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
Preamble += "struct __NSConstantStringImpl {\n";
Preamble += " int *isa;\n";
Preamble += " int flags;\n";
Preamble += " char *str;\n";
Preamble += "#if _WIN64\n";
Preamble += " long long length;\n";
Preamble += "#else\n";
Preamble += " long length;\n";
Preamble += "#endif\n";
Preamble += "};\n";
Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
Preamble += "#else\n";
Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
Preamble += "#endif\n";
Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
Preamble += "#endif\n";
// Blocks preamble.
Preamble += "#ifndef BLOCK_IMPL\n";
Preamble += "#define BLOCK_IMPL\n";
Preamble += "struct __block_impl {\n";
Preamble += " void *isa;\n";
Preamble += " int Flags;\n";
Preamble += " int Reserved;\n";
Preamble += " void *FuncPtr;\n";
Preamble += "};\n";
Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
Preamble += "extern \"C\" __declspec(dllexport) "
"void _Block_object_assign(void *, const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
Preamble += "#else\n";
Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
Preamble += "#endif\n";
Preamble += "#endif\n";
if (LangOpts.MicrosoftExt) {
Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
Preamble += "#define __attribute__(X)\n";
Preamble += "#endif\n";
Preamble += "#ifndef __weak\n";
Preamble += "#define __weak\n";
Preamble += "#endif\n";
Preamble += "#ifndef __block\n";
Preamble += "#define __block\n";
Preamble += "#endif\n";
}
else {
Preamble += "#define __block\n";
Preamble += "#define __weak\n";
}
// Declarations required for modern objective-c array and dictionary literals.
Preamble += "\n#include <stdarg.h>\n";
Preamble += "struct __NSContainer_literal {\n";
Preamble += " void * *arr;\n";
Preamble += " __NSContainer_literal (unsigned int count, ...) {\n";
Preamble += "\tva_list marker;\n";
Preamble += "\tva_start(marker, count);\n";
Preamble += "\tarr = new void *[count];\n";
Preamble += "\tfor (unsigned i = 0; i < count; i++)\n";
Preamble += "\t arr[i] = va_arg(marker, void *);\n";
Preamble += "\tva_end( marker );\n";
Preamble += " };\n";
Preamble += " ~__NSContainer_literal() {\n";
Preamble += "\tdelete[] arr;\n";
Preamble += " }\n";
Preamble += "};\n";
// Declaration required for implementation of @autoreleasepool statement.
Preamble += "extern \"C\" __declspec(dllimport) void * objc_autoreleasePoolPush(void);\n";
Preamble += "extern \"C\" __declspec(dllimport) void objc_autoreleasePoolPop(void *);\n\n";
Preamble += "struct __AtAutoreleasePool {\n";
Preamble += " __AtAutoreleasePool() {atautoreleasepoolobj = objc_autoreleasePoolPush();}\n";
Preamble += " ~__AtAutoreleasePool() {objc_autoreleasePoolPop(atautoreleasepoolobj);}\n";
Preamble += " void * atautoreleasepoolobj;\n";
Preamble += "};\n";
// NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
// as this avoids warning in any 64bit/32bit compilation model.
Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
}
/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
/// ivar offset.
void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
std::string &Result) {
Result += "__OFFSETOFIVAR__(struct ";
Result += ivar->getContainingInterface()->getNameAsString();
if (LangOpts.MicrosoftExt)
Result += "_IMPL";
Result += ", ";
if (ivar->isBitField())
ObjCIvarBitfieldGroupDecl(ivar, Result);
else
Result += ivar->getNameAsString();
Result += ")";
}
/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI.
/// struct _prop_t {
/// const char *name;
/// char *attributes;
/// }
/// struct _prop_list_t {
/// uint32_t entsize; // sizeof(struct _prop_t)
/// uint32_t count_of_properties;
/// struct _prop_t prop_list[count_of_properties];
/// }
/// struct _protocol_t;
/// struct _protocol_list_t {
/// long protocol_count; // Note, this is 32/64 bit
/// struct _protocol_t * protocol_list[protocol_count];
/// }
/// struct _objc_method {
/// SEL _cmd;
/// const char *method_type;
/// char *_imp;
/// }
/// struct _method_list_t {
/// uint32_t entsize; // sizeof(struct _objc_method)
/// uint32_t method_count;
/// struct _objc_method method_list[method_count];
/// }
/// struct _protocol_t {
/// id isa; // NULL
/// const char *protocol_name;
/// const struct _protocol_list_t * protocol_list; // super protocols
/// const struct method_list_t *instance_methods;
/// const struct method_list_t *class_methods;
/// const struct method_list_t *optionalInstanceMethods;
/// const struct method_list_t *optionalClassMethods;
/// const struct _prop_list_t * properties;
/// const uint32_t size; // sizeof(struct _protocol_t)
/// const uint32_t flags; // = 0
/// const char ** extendedMethodTypes;
/// }
/// struct _ivar_t {
/// unsigned long int *offset; // pointer to ivar offset location
/// const char *name;
/// const char *type;
/// uint32_t alignment;
/// uint32_t size;
/// }
/// struct _ivar_list_t {
/// uint32 entsize; // sizeof(struct _ivar_t)
/// uint32 count;
/// struct _ivar_t list[count];
/// }
/// struct _class_ro_t {
/// uint32_t flags;
/// uint32_t instanceStart;
/// uint32_t instanceSize;
/// uint32_t reserved; // only when building for 64bit targets
/// const uint8_t *ivarLayout;
/// const char *name;
/// const struct _method_list_t *baseMethods;
/// const struct _protocol_list_t *baseProtocols;
/// const struct _ivar_list_t *ivars;
/// const uint8_t *weakIvarLayout;
/// const struct _prop_list_t *properties;
/// }
/// struct _class_t {
/// struct _class_t *isa;
/// struct _class_t *superclass;
/// void *cache;
/// IMP *vtable;
/// struct _class_ro_t *ro;
/// }
/// struct _category_t {
/// const char *name;
/// struct _class_t *cls;
/// const struct _method_list_t *instance_methods;
/// const struct _method_list_t *class_methods;
/// const struct _protocol_list_t *protocols;
/// const struct _prop_list_t *properties;
/// }
/// MessageRefTy - LLVM for:
/// struct _message_ref_t {
/// IMP messenger;
/// SEL name;
/// };
/// SuperMessageRefTy - LLVM for:
/// struct _super_message_ref_t {
/// SUPER_IMP messenger;
/// SEL name;
/// };
static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) {
static bool meta_data_declared = false;
if (meta_data_declared)
return;
Result += "\nstruct _prop_t {\n";
Result += "\tconst char *name;\n";
Result += "\tconst char *attributes;\n";
Result += "};\n";
Result += "\nstruct _protocol_t;\n";
Result += "\nstruct _objc_method {\n";
Result += "\tstruct objc_selector * _cmd;\n";
Result += "\tconst char *method_type;\n";
Result += "\tvoid *_imp;\n";
Result += "};\n";
Result += "\nstruct _protocol_t {\n";
Result += "\tvoid * isa; // NULL\n";
Result += "\tconst char *protocol_name;\n";
Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n";
Result += "\tconst struct method_list_t *instance_methods;\n";
Result += "\tconst struct method_list_t *class_methods;\n";
Result += "\tconst struct method_list_t *optionalInstanceMethods;\n";
Result += "\tconst struct method_list_t *optionalClassMethods;\n";
Result += "\tconst struct _prop_list_t * properties;\n";
Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n";
Result += "\tconst unsigned int flags; // = 0\n";
Result += "\tconst char ** extendedMethodTypes;\n";
Result += "};\n";
Result += "\nstruct _ivar_t {\n";
Result += "\tunsigned long int *offset; // pointer to ivar offset location\n";
Result += "\tconst char *name;\n";
Result += "\tconst char *type;\n";
Result += "\tunsigned int alignment;\n";
Result += "\tunsigned int size;\n";
Result += "};\n";
Result += "\nstruct _class_ro_t {\n";
Result += "\tunsigned int flags;\n";
Result += "\tunsigned int instanceStart;\n";
Result += "\tunsigned int instanceSize;\n";
const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
if (Triple.getArch() == llvm::Triple::x86_64)
Result += "\tunsigned int reserved;\n";
Result += "\tconst unsigned char *ivarLayout;\n";
Result += "\tconst char *name;\n";
Result += "\tconst struct _method_list_t *baseMethods;\n";
Result += "\tconst struct _objc_protocol_list *baseProtocols;\n";
Result += "\tconst struct _ivar_list_t *ivars;\n";
Result += "\tconst unsigned char *weakIvarLayout;\n";
Result += "\tconst struct _prop_list_t *properties;\n";
Result += "};\n";
Result += "\nstruct _class_t {\n";
Result += "\tstruct _class_t *isa;\n";
Result += "\tstruct _class_t *superclass;\n";
Result += "\tvoid *cache;\n";
Result += "\tvoid *vtable;\n";
Result += "\tstruct _class_ro_t *ro;\n";
Result += "};\n";
Result += "\nstruct _category_t {\n";
Result += "\tconst char *name;\n";
Result += "\tstruct _class_t *cls;\n";
Result += "\tconst struct _method_list_t *instance_methods;\n";
Result += "\tconst struct _method_list_t *class_methods;\n";
Result += "\tconst struct _protocol_list_t *protocols;\n";
Result += "\tconst struct _prop_list_t *properties;\n";
Result += "};\n";
Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n";
Result += "#pragma warning(disable:4273)\n";
meta_data_declared = true;
}
static void Write_protocol_list_t_TypeDecl(std::string &Result,
long super_protocol_count) {
Result += "struct /*_protocol_list_t*/"; Result += " {\n";
Result += "\tlong protocol_count; // Note, this is 32/64 bit\n";
Result += "\tstruct _protocol_t *super_protocols[";
Result += utostr(super_protocol_count); Result += "];\n";
Result += "}";
}
static void Write_method_list_t_TypeDecl(std::string &Result,
unsigned int method_count) {
Result += "struct /*_method_list_t*/"; Result += " {\n";
Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n";
Result += "\tunsigned int method_count;\n";
Result += "\tstruct _objc_method method_list[";
Result += utostr(method_count); Result += "];\n";
Result += "}";
}
static void Write__prop_list_t_TypeDecl(std::string &Result,
unsigned int property_count) {
Result += "struct /*_prop_list_t*/"; Result += " {\n";
Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
Result += "\tunsigned int count_of_properties;\n";
Result += "\tstruct _prop_t prop_list[";
Result += utostr(property_count); Result += "];\n";
Result += "}";
}
static void Write__ivar_list_t_TypeDecl(std::string &Result,
unsigned int ivar_count) {
Result += "struct /*_ivar_list_t*/"; Result += " {\n";
Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
Result += "\tunsigned int count;\n";
Result += "\tstruct _ivar_t ivar_list[";
Result += utostr(ivar_count); Result += "];\n";
Result += "}";
}
static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result,
ArrayRef<ObjCProtocolDecl *> SuperProtocols,
StringRef VarName,
StringRef ProtocolName) {
if (SuperProtocols.size() > 0) {
Result += "\nstatic ";
Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size());
Result += " "; Result += VarName;
Result += ProtocolName;
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n";
for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) {
ObjCProtocolDecl *SuperPD = SuperProtocols[i];
Result += "\t&"; Result += "_OBJC_PROTOCOL_";
Result += SuperPD->getNameAsString();
if (i == e-1)
Result += "\n};\n";
else
Result += ",\n";
}
}
}
static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj,
ASTContext *Context, std::string &Result,
ArrayRef<ObjCMethodDecl *> Methods,
StringRef VarName,
StringRef TopLevelDeclName,
bool MethodImpl) {
if (Methods.size() > 0) {
Result += "\nstatic ";
Write_method_list_t_TypeDecl(Result, Methods.size());
Result += " "; Result += VarName;
Result += TopLevelDeclName;
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n";
Result += "\t"; Result += utostr(Methods.size()); Result += ",\n";
for (unsigned i = 0, e = Methods.size(); i < e; i++) {
ObjCMethodDecl *MD = Methods[i];
if (i == 0)
Result += "\t{{(struct objc_selector *)\"";
else
Result += "\t{(struct objc_selector *)\"";
Result += (MD)->getSelector().getAsString(); Result += "\"";
Result += ", ";
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl(MD, MethodTypeString);
Result += "\""; Result += MethodTypeString; Result += "\"";
Result += ", ";
if (!MethodImpl)
Result += "0";
else {
Result += "(void *)";
Result += RewriteObj.MethodInternalNames[MD];
}
if (i == e-1)
Result += "}}\n";
else
Result += "},\n";
}
Result += "};\n";
}
}
static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj,
ASTContext *Context, std::string &Result,
ArrayRef<ObjCPropertyDecl *> Properties,
const Decl *Container,
StringRef VarName,
StringRef ProtocolName) {
if (Properties.size() > 0) {
Result += "\nstatic ";
Write__prop_list_t_TypeDecl(Result, Properties.size());
Result += " "; Result += VarName;
Result += ProtocolName;
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n";
Result += "\t"; Result += utostr(Properties.size()); Result += ",\n";
for (unsigned i = 0, e = Properties.size(); i < e; i++) {
ObjCPropertyDecl *PropDecl = Properties[i];
if (i == 0)
Result += "\t{{\"";
else
Result += "\t{\"";
Result += PropDecl->getName(); Result += "\",";
std::string PropertyTypeString, QuotePropertyTypeString;
Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString);
RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString);
Result += "\""; Result += QuotePropertyTypeString; Result += "\"";
if (i == e-1)
Result += "}}\n";
else
Result += "},\n";
}
Result += "};\n";
}
}
// Metadata flags
enum MetaDataDlags {
CLS = 0x0,
CLS_META = 0x1,
CLS_ROOT = 0x2,
OBJC2_CLS_HIDDEN = 0x10,
CLS_EXCEPTION = 0x20,
/// (Obsolete) ARC-specific: this class has a .release_ivars method
CLS_HAS_IVAR_RELEASER = 0x40,
/// class was compiled with -fobjc-arr
CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
};
static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result,
unsigned int flags,
const std::string &InstanceStart,
const std::string &InstanceSize,
ArrayRef<ObjCMethodDecl *>baseMethods,
ArrayRef<ObjCProtocolDecl *>baseProtocols,
ArrayRef<ObjCIvarDecl *>ivars,
ArrayRef<ObjCPropertyDecl *>Properties,
StringRef VarName,
StringRef ClassName) {
Result += "\nstatic struct _class_ro_t ";
Result += VarName; Result += ClassName;
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
Result += "\t";
Result += llvm::utostr(flags); Result += ", ";
Result += InstanceStart; Result += ", ";
Result += InstanceSize; Result += ", \n";
Result += "\t";
const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
if (Triple.getArch() == llvm::Triple::x86_64)
// uint32_t const reserved; // only when building for 64bit targets
Result += "(unsigned int)0, \n\t";
// const uint8_t * const ivarLayout;
Result += "0, \n\t";
Result += "\""; Result += ClassName; Result += "\",\n\t";
bool metaclass = ((flags & CLS_META) != 0);
if (baseMethods.size() > 0) {
Result += "(const struct _method_list_t *)&";
if (metaclass)
Result += "_OBJC_$_CLASS_METHODS_";
else
Result += "_OBJC_$_INSTANCE_METHODS_";
Result += ClassName;
Result += ",\n\t";
}
else
Result += "0, \n\t";
if (!metaclass && baseProtocols.size() > 0) {
Result += "(const struct _objc_protocol_list *)&";
Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName;
Result += ",\n\t";
}
else
Result += "0, \n\t";
if (!metaclass && ivars.size() > 0) {
Result += "(const struct _ivar_list_t *)&";
Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName;
Result += ",\n\t";
}
else
Result += "0, \n\t";
// weakIvarLayout
Result += "0, \n\t";
if (!metaclass && Properties.size() > 0) {
Result += "(const struct _prop_list_t *)&";
Result += "_OBJC_$_PROP_LIST_"; Result += ClassName;
Result += ",\n";
}
else
Result += "0, \n";
Result += "};\n";
}
static void Write_class_t(ASTContext *Context, std::string &Result,
StringRef VarName,
const ObjCInterfaceDecl *CDecl, bool metaclass) {
bool rootClass = (!CDecl->getSuperClass());
const ObjCInterfaceDecl *RootClass = CDecl;
if (!rootClass) {
// Find the Root class
RootClass = CDecl->getSuperClass();
while (RootClass->getSuperClass()) {
RootClass = RootClass->getSuperClass();
}
}
if (metaclass && rootClass) {
// Need to handle a case of use of forward declaration.
Result += "\n";
Result += "extern \"C\" ";
if (CDecl->getImplementation())
Result += "__declspec(dllexport) ";
else
Result += "__declspec(dllimport) ";
Result += "struct _class_t OBJC_CLASS_$_";
Result += CDecl->getNameAsString();
Result += ";\n";
}
// Also, for possibility of 'super' metadata class not having been defined yet.
if (!rootClass) {
ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
Result += "\n";
Result += "extern \"C\" ";
if (SuperClass->getImplementation())
Result += "__declspec(dllexport) ";
else
Result += "__declspec(dllimport) ";
Result += "struct _class_t ";
Result += VarName;
Result += SuperClass->getNameAsString();
Result += ";\n";
if (metaclass && RootClass != SuperClass) {
Result += "extern \"C\" ";
if (RootClass->getImplementation())
Result += "__declspec(dllexport) ";
else
Result += "__declspec(dllimport) ";
Result += "struct _class_t ";
Result += VarName;
Result += RootClass->getNameAsString();
Result += ";\n";
}
}
Result += "\nextern \"C\" __declspec(dllexport) struct _class_t ";
Result += VarName; Result += CDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n";
Result += "\t";
if (metaclass) {
if (!rootClass) {
Result += "0, // &"; Result += VarName;
Result += RootClass->getNameAsString();
Result += ",\n\t";
Result += "0, // &"; Result += VarName;
Result += CDecl->getSuperClass()->getNameAsString();
Result += ",\n\t";
}
else {
Result += "0, // &"; Result += VarName;
Result += CDecl->getNameAsString();
Result += ",\n\t";
Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString();
Result += ",\n\t";
}
}
else {
Result += "0, // &OBJC_METACLASS_$_";
Result += CDecl->getNameAsString();
Result += ",\n\t";
if (!rootClass) {
Result += "0, // &"; Result += VarName;
Result += CDecl->getSuperClass()->getNameAsString();
Result += ",\n\t";
}
else
Result += "0,\n\t";
}
Result += "0, // (void *)&_objc_empty_cache,\n\t";
Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t";
if (metaclass)
Result += "&_OBJC_METACLASS_RO_$_";
else
Result += "&_OBJC_CLASS_RO_$_";
Result += CDecl->getNameAsString();
Result += ",\n};\n";
// Add static function to initialize some of the meta-data fields.
// avoid doing it twice.
if (metaclass)
return;
const ObjCInterfaceDecl *SuperClass =
rootClass ? CDecl : CDecl->getSuperClass();
Result += "static void OBJC_CLASS_SETUP_$_";
Result += CDecl->getNameAsString();
Result += "(void ) {\n";
Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
Result += RootClass->getNameAsString(); Result += ";\n";
Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
Result += ".superclass = ";
if (rootClass)
Result += "&OBJC_CLASS_$_";
else
Result += "&OBJC_METACLASS_$_";
Result += SuperClass->getNameAsString(); Result += ";\n";
Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
Result += CDecl->getNameAsString(); Result += ";\n";
if (!rootClass) {
Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
Result += ".superclass = "; Result += "&OBJC_CLASS_$_";
Result += SuperClass->getNameAsString(); Result += ";\n";
}
Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
Result += "}\n";
}
static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context,
std::string &Result,
ObjCCategoryDecl *CatDecl,
ObjCInterfaceDecl *ClassDecl,
ArrayRef<ObjCMethodDecl *> InstanceMethods,
ArrayRef<ObjCMethodDecl *> ClassMethods,
ArrayRef<ObjCProtocolDecl *> RefedProtocols,
ArrayRef<ObjCPropertyDecl *> ClassProperties) {
StringRef CatName = CatDecl->getName();
StringRef ClassName = ClassDecl->getName();
// must declare an extern class object in case this class is not implemented
// in this TU.
Result += "\n";
Result += "extern \"C\" ";
if (ClassDecl->getImplementation())
Result += "__declspec(dllexport) ";
else
Result += "__declspec(dllimport) ";
Result += "struct _class_t ";
Result += "OBJC_CLASS_$_"; Result += ClassName;
Result += ";\n";
Result += "\nstatic struct _category_t ";
Result += "_OBJC_$_CATEGORY_";
Result += ClassName; Result += "_$_"; Result += CatName;
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
Result += "{\n";
Result += "\t\""; Result += ClassName; Result += "\",\n";
Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName;
Result += ",\n";
if (InstanceMethods.size() > 0) {
Result += "\t(const struct _method_list_t *)&";
Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_";
Result += ClassName; Result += "_$_"; Result += CatName;
Result += ",\n";
}
else
Result += "\t0,\n";
if (ClassMethods.size() > 0) {
Result += "\t(const struct _method_list_t *)&";
Result += "_OBJC_$_CATEGORY_CLASS_METHODS_";
Result += ClassName; Result += "_$_"; Result += CatName;
Result += ",\n";
}
else
Result += "\t0,\n";
if (RefedProtocols.size() > 0) {
Result += "\t(const struct _protocol_list_t *)&";
Result += "_OBJC_CATEGORY_PROTOCOLS_$_";
Result += ClassName; Result += "_$_"; Result += CatName;
Result += ",\n";
}
else
Result += "\t0,\n";
if (ClassProperties.size() > 0) {
Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_";
Result += ClassName; Result += "_$_"; Result += CatName;
Result += ",\n";
}
else
Result += "\t0,\n";
Result += "};\n";
// Add static function to initialize the class pointer in the category structure.
Result += "static void OBJC_CATEGORY_SETUP_$_";
Result += ClassDecl->getNameAsString();
Result += "_$_";
Result += CatName;
Result += "(void ) {\n";
Result += "\t_OBJC_$_CATEGORY_";
Result += ClassDecl->getNameAsString();
Result += "_$_";
Result += CatName;
Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName;
Result += ";\n}\n";
}
static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj,
ASTContext *Context, std::string &Result,
ArrayRef<ObjCMethodDecl *> Methods,
StringRef VarName,
StringRef ProtocolName) {
if (Methods.size() == 0)
return;
Result += "\nstatic const char *";
Result += VarName; Result += ProtocolName;
Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
Result += "{\n";
for (unsigned i = 0, e = Methods.size(); i < e; i++) {
ObjCMethodDecl *MD = Methods[i];
std::string MethodTypeString, QuoteMethodTypeString;
Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true);
RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString);
Result += "\t\""; Result += QuoteMethodTypeString; Result += "\"";
if (i == e-1)
Result += "\n};\n";
else {
Result += ",\n";
}
}
}
static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
ASTContext *Context,
std::string &Result,
ArrayRef<ObjCIvarDecl *> Ivars,
ObjCInterfaceDecl *CDecl) {
// FIXME. visibilty of offset symbols may have to be set; for Darwin
// this is what happens:
/**
if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
Ivar->getAccessControl() == ObjCIvarDecl::Package ||
Class->getVisibility() == HiddenVisibility)
Visibility shoud be: HiddenVisibility;
else
Visibility shoud be: DefaultVisibility;
*/
Result += "\n";
for (unsigned i =0, e = Ivars.size(); i < e; i++) {
ObjCIvarDecl *IvarDecl = Ivars[i];
if (Context->getLangOpts().MicrosoftExt)
Result += "__declspec(allocate(\".objc_ivar$B\")) ";
if (!Context->getLangOpts().MicrosoftExt ||
IvarDecl->getAccessControl() == ObjCIvarDecl::Private ||
IvarDecl->getAccessControl() == ObjCIvarDecl::Package)
Result += "extern \"C\" unsigned long int ";
else
Result += "extern \"C\" __declspec(dllexport) unsigned long int ";
if (Ivars[i]->isBitField())
RewriteObj.ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
else
WriteInternalIvarName(CDecl, IvarDecl, Result);
Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))";
Result += " = ";
RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result);
Result += ";\n";
if (Ivars[i]->isBitField()) {
// skip over rest of the ivar bitfields.
SKIP_BITFIELDS(i , e, Ivars);
}
}
}
static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj,
ASTContext *Context, std::string &Result,
ArrayRef<ObjCIvarDecl *> OriginalIvars,
StringRef VarName,
ObjCInterfaceDecl *CDecl) {
if (OriginalIvars.size() > 0) {
Write_IvarOffsetVar(RewriteObj, Context, Result, OriginalIvars, CDecl);
SmallVector<ObjCIvarDecl *, 8> Ivars;
// strip off all but the first ivar bitfield from each group of ivars.
// Such ivars in the ivar list table will be replaced by their grouping struct
// 'ivar'.
for (unsigned i = 0, e = OriginalIvars.size(); i < e; i++) {
if (OriginalIvars[i]->isBitField()) {
Ivars.push_back(OriginalIvars[i]);
// skip over rest of the ivar bitfields.
SKIP_BITFIELDS(i , e, OriginalIvars);
}
else
Ivars.push_back(OriginalIvars[i]);
}
Result += "\nstatic ";
Write__ivar_list_t_TypeDecl(Result, Ivars.size());
Result += " "; Result += VarName;
Result += CDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n";
Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n";
for (unsigned i =0, e = Ivars.size(); i < e; i++) {
ObjCIvarDecl *IvarDecl = Ivars[i];
if (i == 0)
Result += "\t{{";
else
Result += "\t {";
Result += "(unsigned long int *)&";
if (Ivars[i]->isBitField())
RewriteObj.ObjCIvarBitfieldGroupOffset(IvarDecl, Result);
else
WriteInternalIvarName(CDecl, IvarDecl, Result);
Result += ", ";
Result += "\"";
if (Ivars[i]->isBitField())
RewriteObj.ObjCIvarBitfieldGroupDecl(Ivars[i], Result);
else
Result += IvarDecl->getName();
Result += "\", ";
QualType IVQT = IvarDecl->getType();
if (IvarDecl->isBitField())
IVQT = RewriteObj.GetGroupRecordTypeForObjCIvarBitfield(IvarDecl);
std::string IvarTypeString, QuoteIvarTypeString;
Context->getObjCEncodingForType(IVQT, IvarTypeString,
IvarDecl);
RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString);
Result += "\""; Result += QuoteIvarTypeString; Result += "\", ";
// FIXME. this alignment represents the host alignment and need be changed to
// represent the target alignment.
unsigned Align = Context->getTypeAlign(IVQT)/8;
Align = llvm::Log2_32(Align);
Result += llvm::utostr(Align); Result += ", ";
CharUnits Size = Context->getTypeSizeInChars(IVQT);
Result += llvm::utostr(Size.getQuantity());
if (i == e-1)
Result += "}}\n";
else
Result += "},\n";
}
Result += "};\n";
}
}
/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
std::string &Result) {
// Do not synthesize the protocol more than once.
if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
return;
WriteModernMetadataDeclarations(Context, Result);
if (ObjCProtocolDecl *Def = PDecl->getDefinition())
PDecl = Def;
// Must write out all protocol definitions in current qualifier list,
// and in their nested qualifiers before writing out current definition.
for (auto *I : PDecl->protocols())
RewriteObjCProtocolMetaData(I, Result);
// Construct method lists.
std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
for (auto *MD : PDecl->instance_methods()) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(MD);
} else {
InstanceMethods.push_back(MD);
}
}
for (auto *MD : PDecl->class_methods()) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(MD);
} else {
ClassMethods.push_back(MD);
}
}
std::vector<ObjCMethodDecl *> AllMethods;
for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++)
AllMethods.push_back(InstanceMethods[i]);
for (unsigned i = 0, e = ClassMethods.size(); i < e; i++)
AllMethods.push_back(ClassMethods[i]);
for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++)
AllMethods.push_back(OptInstanceMethods[i]);
for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++)
AllMethods.push_back(OptClassMethods[i]);
Write__extendedMethodTypes_initializer(*this, Context, Result,
AllMethods,
"_OBJC_PROTOCOL_METHOD_TYPES_",
PDecl->getNameAsString());
// Protocol's super protocol list
SmallVector<ObjCProtocolDecl *, 8> SuperProtocols(PDecl->protocols());
Write_protocol_list_initializer(Context, Result, SuperProtocols,
"_OBJC_PROTOCOL_REFS_",
PDecl->getNameAsString());
Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
"_OBJC_PROTOCOL_INSTANCE_METHODS_",
PDecl->getNameAsString(), false);
Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
"_OBJC_PROTOCOL_CLASS_METHODS_",
PDecl->getNameAsString(), false);
Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods,
"_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_",
PDecl->getNameAsString(), false);
Write_method_list_t_initializer(*this, Context, Result, OptClassMethods,
"_OBJC_PROTOCOL_OPT_CLASS_METHODS_",
PDecl->getNameAsString(), false);
// Protocol's property metadata.
SmallVector<ObjCPropertyDecl *, 8> ProtocolProperties(PDecl->properties());
Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties,
/* Container */nullptr,
"_OBJC_PROTOCOL_PROPERTIES_",
PDecl->getNameAsString());
// Writer out root metadata for current protocol: struct _protocol_t
Result += "\n";
if (LangOpts.MicrosoftExt)
Result += "static ";
Result += "struct _protocol_t _OBJC_PROTOCOL_";
Result += PDecl->getNameAsString();
Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n";
Result += "\t0,\n"; // id is; is null
Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n";
if (SuperProtocols.size() > 0) {
Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
if (InstanceMethods.size() > 0) {
Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
if (ClassMethods.size() > 0) {
Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
if (OptInstanceMethods.size() > 0) {
Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
if (OptClassMethods.size() > 0) {
Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
if (ProtocolProperties.size() > 0) {
Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_";
Result += PDecl->getNameAsString(); Result += ",\n";
}
else
Result += "\t0,\n";
Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n";
Result += "\t0,\n";
if (AllMethods.size() > 0) {
Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_";
Result += PDecl->getNameAsString();
Result += "\n};\n";
}
else
Result += "\t0\n};\n";
if (LangOpts.MicrosoftExt)
Result += "static ";
Result += "struct _protocol_t *";
Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString();
Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
Result += ";\n";
// Mark this protocol as having been generated.
if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()).second)
llvm_unreachable("protocol already synthesized");
}
void RewriteModernObjC::RewriteObjCProtocolListMetaData(
const ObjCList<ObjCProtocolDecl> &Protocols,
StringRef prefix, StringRef ClassName,
std::string &Result) {
if (Protocols.empty()) return;
for (unsigned i = 0; i != Protocols.size(); i++)
RewriteObjCProtocolMetaData(Protocols[i], Result);
// Output the top lovel protocol meta-data for the class.
/* struct _objc_protocol_list {
struct _objc_protocol_list *next;
int protocol_count;
struct _objc_protocol *class_protocols[];
}
*/
Result += "\n";
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".cat_cls_meth$B\")) ";
Result += "static struct {\n";
Result += "\tstruct _objc_protocol_list *next;\n";
Result += "\tint protocol_count;\n";
Result += "\tstruct _objc_protocol *class_protocols[";
Result += utostr(Protocols.size());
Result += "];\n} _OBJC_";
Result += prefix;
Result += "_PROTOCOLS_";
Result += ClassName;
Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
"{\n\t0, ";
Result += utostr(Protocols.size());
Result += "\n";
Result += "\t,{&_OBJC_PROTOCOL_";
Result += Protocols[0]->getNameAsString();
Result += " \n";
for (unsigned i = 1; i != Protocols.size(); i++) {
Result += "\t ,&_OBJC_PROTOCOL_";
Result += Protocols[i]->getNameAsString();
Result += "\n";
}
Result += "\t }\n};\n";
}
/// hasObjCExceptionAttribute - Return true if this class or any super
/// class has the __objc_exception__ attribute.
/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen.
static bool hasObjCExceptionAttribute(ASTContext &Context,
const ObjCInterfaceDecl *OID) {
if (OID->hasAttr<ObjCExceptionAttr>())
return true;
if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
return hasObjCExceptionAttribute(Context, Super);
return false;
}
void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
std::string &Result) {
ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
// Explicitly declared @interface's are already synthesized.
if (CDecl->isImplicitInterfaceDecl())
assert(false &&
"Legacy implicit interface rewriting not supported in moder abi");
WriteModernMetadataDeclarations(Context, Result);
SmallVector<ObjCIvarDecl *, 8> IVars;
for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
IVars.push_back(IVD);
}
Write__ivar_list_t_initializer(*this, Context, Result, IVars,
"_OBJC_$_INSTANCE_VARIABLES_",
CDecl);
// Build _objc_method_list for class's instance methods if needed
SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
// If any of our property implementations have associated getters or
// setters, produce metadata for them as well.
for (const auto *Prop : IDecl->property_impls()) {
if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
if (!Prop->getPropertyIvarDecl())
continue;
ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
if (mustSynthesizeSetterGetterMethod(IDecl, PD, true /*getter*/))
InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
if (mustSynthesizeSetterGetterMethod(IDecl, PD, false /*setter*/))
InstanceMethods.push_back(Setter);
}
Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
"_OBJC_$_INSTANCE_METHODS_",
IDecl->getNameAsString(), true);
SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
"_OBJC_$_CLASS_METHODS_",
IDecl->getNameAsString(), true);
// Protocols referenced in class declaration?
// Protocol's super protocol list
std::vector<ObjCProtocolDecl *> RefedProtocols;
const ObjCList<ObjCProtocolDecl> &Protocols = CDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I) {
RefedProtocols.push_back(*I);
// Must write out all protocol definitions in current qualifier list,
// and in their nested qualifiers before writing out current definition.
RewriteObjCProtocolMetaData(*I, Result);
}
Write_protocol_list_initializer(Context, Result,
RefedProtocols,
"_OBJC_CLASS_PROTOCOLS_$_",
IDecl->getNameAsString());
// Protocol's property metadata.
SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
/* Container */IDecl,
"_OBJC_$_PROP_LIST_",
CDecl->getNameAsString());
// Data for initializing _class_ro_t metaclass meta-data
uint32_t flags = CLS_META;
std::string InstanceSize;
std::string InstanceStart;
bool classIsHidden = CDecl->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
if (!CDecl->getSuperClass())
// class is root
flags |= CLS_ROOT;
InstanceSize = "sizeof(struct _class_t)";
InstanceStart = InstanceSize;
Write__class_ro_t_initializer(Context, Result, flags,
InstanceStart, InstanceSize,
ClassMethods,
nullptr,
nullptr,
nullptr,
"_OBJC_METACLASS_RO_$_",
CDecl->getNameAsString());
// Data for initializing _class_ro_t meta-data
flags = CLS;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
if (hasObjCExceptionAttribute(*Context, CDecl))
flags |= CLS_EXCEPTION;
if (!CDecl->getSuperClass())
// class is root
flags |= CLS_ROOT;
InstanceSize.clear();
InstanceStart.clear();
if (!ObjCSynthesizedStructs.count(CDecl)) {
InstanceSize = "0";
InstanceStart = "0";
}
else {
InstanceSize = "sizeof(struct ";
InstanceSize += CDecl->getNameAsString();
InstanceSize += "_IMPL)";
ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
if (IVD) {
RewriteIvarOffsetComputation(IVD, InstanceStart);
}
else
InstanceStart = InstanceSize;
}
Write__class_ro_t_initializer(Context, Result, flags,
InstanceStart, InstanceSize,
InstanceMethods,
RefedProtocols,
IVars,
ClassProperties,
"_OBJC_CLASS_RO_$_",
CDecl->getNameAsString());
Write_class_t(Context, Result,
"OBJC_METACLASS_$_",
CDecl, /*metaclass*/true);
Write_class_t(Context, Result,
"OBJC_CLASS_$_",
CDecl, /*metaclass*/false);
if (ImplementationIsNonLazy(IDecl))
DefinedNonLazyClasses.push_back(CDecl);
}
void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) {
int ClsDefCount = ClassImplementation.size();
if (!ClsDefCount)
return;
Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
Result += "static void *OBJC_CLASS_SETUP[] = {\n";
for (int i = 0; i < ClsDefCount; i++) {
ObjCImplementationDecl *IDecl = ClassImplementation[i];
ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
Result += "\t(void *)&OBJC_CLASS_SETUP_$_";
Result += CDecl->getName(); Result += ",\n";
}
Result += "};\n";
}
void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) {
int ClsDefCount = ClassImplementation.size();
int CatDefCount = CategoryImplementation.size();
// For each implemented class, write out all its meta data.
for (int i = 0; i < ClsDefCount; i++)
RewriteObjCClassMetaData(ClassImplementation[i], Result);
RewriteClassSetupInitHook(Result);
// For each implemented category, write out all its meta data.
for (int i = 0; i < CatDefCount; i++)
RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
RewriteCategorySetupInitHook(Result);
if (ClsDefCount > 0) {
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_classlist$B\")) ";
Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ [";
Result += llvm::utostr(ClsDefCount); Result += "]";
Result +=
" __attribute__((used, section (\"__DATA, __objc_classlist,"
"regular,no_dead_strip\")))= {\n";
for (int i = 0; i < ClsDefCount; i++) {
Result += "\t&OBJC_CLASS_$_";
Result += ClassImplementation[i]->getNameAsString();
Result += ",\n";
}
Result += "};\n";
if (!DefinedNonLazyClasses.empty()) {
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n";
Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t";
for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) {
Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString();
Result += ",\n";
}
Result += "};\n";
}
}
if (CatDefCount > 0) {
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_catlist$B\")) ";
Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ [";
Result += llvm::utostr(CatDefCount); Result += "]";
Result +=
" __attribute__((used, section (\"__DATA, __objc_catlist,"
"regular,no_dead_strip\")))= {\n";
for (int i = 0; i < CatDefCount; i++) {
Result += "\t&_OBJC_$_CATEGORY_";
Result +=
CategoryImplementation[i]->getClassInterface()->getNameAsString();
Result += "_$_";
Result += CategoryImplementation[i]->getNameAsString();
Result += ",\n";
}
Result += "};\n";
}
if (!DefinedNonLazyCategories.empty()) {
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n";
Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t";
for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) {
Result += "\t&_OBJC_$_CATEGORY_";
Result +=
DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString();
Result += "_$_";
Result += DefinedNonLazyCategories[i]->getNameAsString();
Result += ",\n";
}
Result += "};\n";
}
}
void RewriteModernObjC::WriteImageInfo(std::string &Result) {
if (LangOpts.MicrosoftExt)
Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n";
Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } ";
// version 0, ObjCABI is 2
Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n";
}
/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
/// implementation.
void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
std::string &Result) {
WriteModernMetadataDeclarations(Context, Result);
ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
// Find category declaration for this implementation.
ObjCCategoryDecl *CDecl
= ClassDecl->FindCategoryDeclaration(IDecl->getIdentifier());
std::string FullCategoryName = ClassDecl->getNameAsString();
FullCategoryName += "_$_";
FullCategoryName += CDecl->getNameAsString();
// Build _objc_method_list for class's instance methods if needed
SmallVector<ObjCMethodDecl *, 32> InstanceMethods(IDecl->instance_methods());
// If any of our property implementations have associated getters or
// setters, produce metadata for them as well.
for (const auto *Prop : IDecl->property_impls()) {
if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
continue;
if (!Prop->getPropertyIvarDecl())
continue;
ObjCPropertyDecl *PD = Prop->getPropertyDecl();
if (!PD)
continue;
if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
InstanceMethods.push_back(Getter);
if (PD->isReadOnly())
continue;
if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
InstanceMethods.push_back(Setter);
}
Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
"_OBJC_$_CATEGORY_INSTANCE_METHODS_",
FullCategoryName, true);
SmallVector<ObjCMethodDecl *, 32> ClassMethods(IDecl->class_methods());
Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
"_OBJC_$_CATEGORY_CLASS_METHODS_",
FullCategoryName, true);
// Protocols referenced in class declaration?
// Protocol's super protocol list
SmallVector<ObjCProtocolDecl *, 8> RefedProtocols(CDecl->protocols());
for (auto *I : CDecl->protocols())
// Must write out all protocol definitions in current qualifier list,
// and in their nested qualifiers before writing out current definition.
RewriteObjCProtocolMetaData(I, Result);
Write_protocol_list_initializer(Context, Result,
RefedProtocols,
"_OBJC_CATEGORY_PROTOCOLS_$_",
FullCategoryName);
// Protocol's property metadata.
SmallVector<ObjCPropertyDecl *, 8> ClassProperties(CDecl->properties());
Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
/* Container */IDecl,
"_OBJC_$_PROP_LIST_",
FullCategoryName);
Write_category_t(*this, Context, Result,
CDecl,
ClassDecl,
InstanceMethods,
ClassMethods,
RefedProtocols,
ClassProperties);
// Determine if this category is also "non-lazy".
if (ImplementationIsNonLazy(IDecl))
DefinedNonLazyCategories.push_back(CDecl);
}
void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) {
int CatDefCount = CategoryImplementation.size();
if (!CatDefCount)
return;
Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
Result += "static void *OBJC_CATEGORY_SETUP[] = {\n";
for (int i = 0; i < CatDefCount; i++) {
ObjCCategoryImplDecl *IDecl = CategoryImplementation[i];
ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl();
ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_";
Result += ClassDecl->getName();
Result += "_$_";
Result += CatDecl->getName();
Result += ",\n";
}
Result += "};\n";
}
// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
/// class methods.
template<typename MethodIterator>
void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
MethodIterator MethodEnd,
bool IsInstanceMethod,
StringRef prefix,
StringRef ClassName,
std::string &Result) {
if (MethodBegin == MethodEnd) return;
if (!objc_impl_method) {
/* struct _objc_method {
SEL _cmd;
char *method_types;
void *_imp;
}
*/
Result += "\nstruct _objc_method {\n";
Result += "\tSEL _cmd;\n";
Result += "\tchar *method_types;\n";
Result += "\tvoid *_imp;\n";
Result += "};\n";
objc_impl_method = true;
}
// Build _objc_method_list for class's methods if needed
/* struct {
struct _objc_method_list *next_method;
int method_count;
struct _objc_method method_list[];
}
*/
unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
Result += "\n";
if (LangOpts.MicrosoftExt) {
if (IsInstanceMethod)
Result += "__declspec(allocate(\".inst_meth$B\")) ";
else
Result += "__declspec(allocate(\".cls_meth$B\")) ";
}
Result += "static struct {\n";
Result += "\tstruct _objc_method_list *next_method;\n";
Result += "\tint method_count;\n";
Result += "\tstruct _objc_method method_list[";
Result += utostr(NumMethods);
Result += "];\n} _OBJC_";
Result += prefix;
Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
Result += "_METHODS_";
Result += ClassName;
Result += " __attribute__ ((used, section (\"__OBJC, __";
Result += IsInstanceMethod ? "inst" : "cls";
Result += "_meth\")))= ";
Result += "{\n\t0, " + utostr(NumMethods) + "\n";
Result += "\t,{{(SEL)\"";
Result += (*MethodBegin)->getSelector().getAsString().c_str();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
Result += MethodInternalNames[*MethodBegin];
Result += "}\n";
for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
Result += "\t ,{(SEL)\"";
Result += (*MethodBegin)->getSelector().getAsString().c_str();
std::string MethodTypeString;
Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
Result += MethodInternalNames[*MethodBegin];
Result += "}\n";
}
Result += "\t }\n};\n";
}
Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
SourceRange OldRange = IV->getSourceRange();
Expr *BaseExpr = IV->getBase();
// Rewrite the base, but without actually doing replaces.
{
DisableReplaceStmtScope S(*this);
BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
IV->setBase(BaseExpr);
}
ObjCIvarDecl *D = IV->getDecl();
Expr *Replacement = IV;
if (BaseExpr->getType()->isObjCObjectPointerType()) {
const ObjCInterfaceType *iFaceDecl =
dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
// lookup which class implements the instance variable.
ObjCInterfaceDecl *clsDeclared = nullptr;
iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
clsDeclared);
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Build name of symbol holding ivar offset.
std::string IvarOffsetName;
if (D->isBitField())
ObjCIvarBitfieldGroupOffset(D, IvarOffsetName);
else
WriteInternalIvarName(clsDeclared, D, IvarOffsetName);
ReferencedIvars[clsDeclared].insert(D);
// cast offset to "char *".
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->CharTy),
CK_BitCast,
BaseExpr);
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(IvarOffsetName),
Context->UnsignedLongTy, nullptr,
SC_Extern);
DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false,
Context->UnsignedLongTy, VK_LValue,
SourceLocation());
BinaryOperator *addExpr =
new (Context) BinaryOperator(castExpr, DRE, BO_Add,
Context->getPointerType(Context->CharTy),
VK_RValue, OK_Ordinary, SourceLocation(), false);
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
SourceLocation(),
addExpr);
QualType IvarT = D->getType();
if (D->isBitField())
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
RecordDecl *RD = IvarT->getAs<RecordType>()->getDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
ObjCContainerDecl *CDecl =
dyn_cast<ObjCContainerDecl>(D->getDeclContext());
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
std::string RecName = CDecl->getName();
RecName += "_IMPL";
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get(RecName.c_str()));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
Expr *Zero = IntegerLiteral::Create(*Context,
llvm::APInt(UnsignedIntSize, 0),
Context->UnsignedIntTy, SourceLocation());
Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero);
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Zero);
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get(D->getNameAsString()),
IvarT, nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/true, ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(),
FD->getType(), VK_LValue, OK_Ordinary);
IvarT = Context->getDecltypeType(ME, ME->getType());
}
}
convertObjCTypeToCStyleType(IvarT);
QualType castT = Context->getPointerType(IvarT);
castExpr = NoTypeInfoCStyleCastExpr(Context,
castT,
CK_BitCast,
PE);
Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
VK_LValue, OK_Ordinary,
SourceLocation());
PE = new (Context) ParenExpr(OldRange.getBegin(),
OldRange.getEnd(),
Exp);
if (D->isBitField()) {
FieldDecl *FD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
&Context->Idents.get(D->getNameAsString()),
D->getType(), nullptr,
/*BitWidth=*/D->getBitWidth(),
/*Mutable=*/true, ICIS_NoInit);
MemberExpr *ME = new (Context)
MemberExpr(PE, /*isArrow*/ false, SourceLocation(), FD,
SourceLocation(), FD->getType(), VK_LValue, OK_Ordinary);
Replacement = ME;
}
else
Replacement = PE;
}
ReplaceStmtWithRange(IV, Replacement, OldRange);
return Replacement;
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/HTMLPrint.cpp | //===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Pretty-printing of source code to HTML.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Functional HTML pretty-printing.
//===----------------------------------------------------------------------===//
namespace {
class HTMLPrinter : public ASTConsumer {
Rewriter R;
raw_ostream *Out;
Preprocessor &PP;
bool SyntaxHighlight, HighlightMacros;
public:
HTMLPrinter(raw_ostream *OS, Preprocessor &pp,
bool _SyntaxHighlight, bool _HighlightMacros)
: Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight),
HighlightMacros(_HighlightMacros) {}
void Initialize(ASTContext &context) override;
void HandleTranslationUnit(ASTContext &Ctx) override;
};
}
std::unique_ptr<ASTConsumer> clang::CreateHTMLPrinter(raw_ostream *OS,
Preprocessor &PP,
bool SyntaxHighlight,
bool HighlightMacros) {
return llvm::make_unique<HTMLPrinter>(OS, PP, SyntaxHighlight,
HighlightMacros);
}
void HTMLPrinter::Initialize(ASTContext &context) {
R.setSourceMgr(context.getSourceManager(), context.getLangOpts());
}
void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
if (PP.getDiagnostics().hasErrorOccurred())
return;
// Format the file.
FileID FID = R.getSourceMgr().getMainFileID();
const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
const char* Name;
// In some cases, in particular the case where the input is from stdin,
// there is no entry. Fall back to the memory buffer for a name in those
// cases.
if (Entry)
Name = Entry->getName();
else
Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier();
html::AddLineNumbers(R, FID);
html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name);
// If we have a preprocessor, relex the file and syntax highlight.
// We might not have a preprocessor if we come from a deserialized AST file,
// for example.
if (SyntaxHighlight) html::SyntaxHighlight(R, FID, PP);
if (HighlightMacros) html::HighlightMacros(R, FID, PP);
html::EscapeText(R, FID, false, true);
// Emit the HTML.
const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID);
char *Buffer = new char[RewriteBuf.size()]; // HLSL Change: Use overridable operator new
std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer);
Out->write(Buffer, RewriteBuf.size());
delete[] Buffer; // HLSL Change: Use overridable operator delete
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/Frontend | repos/DirectXShaderCompiler/tools/clang/lib/Frontend/Rewrite/FixItRewriter.cpp | //===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is a diagnostic client adaptor that performs rewrites as
// suggested by code modification hints attached to diagnostics. It
// then forwards any diagnostics to the adapted diagnostic client.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/FixItRewriter.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditsReceiver.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
#include <memory>
using namespace clang;
FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const LangOptions &LangOpts,
FixItOptions *FixItOpts)
: Diags(Diags),
Editor(SourceMgr, LangOpts),
Rewrite(SourceMgr, LangOpts),
FixItOpts(FixItOpts),
NumFailures(0),
PrevDiagSilenced(false) {
Owner = Diags.takeClient();
Client = Diags.getClient();
Diags.setClient(this, false);
}
FixItRewriter::~FixItRewriter() {
Diags.setClient(Client, Owner.release() != nullptr);
}
bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
const RewriteBuffer *RewriteBuf = Rewrite.getRewriteBufferFor(ID);
if (!RewriteBuf) return true;
RewriteBuf->write(OS);
OS.flush();
return false;
}
namespace {
class RewritesReceiver : public edit::EditsReceiver {
Rewriter &Rewrite;
public:
RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
void insert(SourceLocation loc, StringRef text) override {
Rewrite.InsertText(loc, text);
}
void replace(CharSourceRange range, StringRef text) override {
Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
}
};
}
bool FixItRewriter::WriteFixedFiles(
std::vector<std::pair<std::string, std::string> > *RewrittenFiles) {
if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) {
Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
return true;
}
RewritesReceiver Rec(Rewrite);
Editor.applyRewrites(Rec);
if (FixItOpts->InPlace) {
// Overwriting open files on Windows is tricky, but the rewriter can do it
// for us.
Rewrite.overwriteChangedFiles();
return false;
}
for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
int fd;
std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
std::error_code EC;
std::unique_ptr<llvm::raw_fd_ostream> OS;
if (fd != -1) {
OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
} else {
OS.reset(new llvm::raw_fd_ostream(Filename, EC, llvm::sys::fs::F_None));
}
if (EC) {
Diags.Report(clang::diag::err_fe_unable_to_open_output) << Filename
<< EC.message();
continue;
}
RewriteBuffer &RewriteBuf = I->second;
RewriteBuf.write(*OS);
OS->flush();
if (RewrittenFiles)
RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
}
return false;
}
bool FixItRewriter::IncludeInDiagnosticCounts() const {
return Client ? Client->IncludeInDiagnosticCounts() : true;
}
void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info);
if (!FixItOpts->Silent ||
DiagLevel >= DiagnosticsEngine::Error ||
(DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) ||
(DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) {
Client->HandleDiagnostic(DiagLevel, Info);
PrevDiagSilenced = false;
} else {
PrevDiagSilenced = true;
}
// Skip over any diagnostics that are ignored or notes.
if (DiagLevel <= DiagnosticsEngine::Note)
return;
// Skip over errors if we are only fixing warnings.
if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) {
++NumFailures;
return;
}
// Make sure that we can perform all of the modifications we
// in this diagnostic.
edit::Commit commit(Editor);
for (unsigned Idx = 0, Last = Info.getNumFixItHints();
Idx < Last; ++Idx) {
const FixItHint &Hint = Info.getFixItHint(Idx);
if (Hint.CodeToInsert.empty()) {
if (Hint.InsertFromRange.isValid())
commit.insertFromRange(Hint.RemoveRange.getBegin(),
Hint.InsertFromRange, /*afterToken=*/false,
Hint.BeforePreviousInsertions);
else
commit.remove(Hint.RemoveRange);
} else {
if (Hint.RemoveRange.isTokenRange() ||
Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
else
commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
/*afterToken=*/false, Hint.BeforePreviousInsertions);
}
}
bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable();
if (!CanRewrite) {
if (Info.getNumFixItHints() > 0)
Diag(Info.getLocation(), diag::note_fixit_in_macro);
// If this was an error, refuse to perform any rewriting.
if (DiagLevel >= DiagnosticsEngine::Error) {
if (++NumFailures == 1)
Diag(Info.getLocation(), diag::note_fixit_unfixed_error);
}
return;
}
if (!Editor.commit(commit)) {
++NumFailures;
Diag(Info.getLocation(), diag::note_fixit_failed);
return;
}
Diag(Info.getLocation(), diag::note_fixit_applied);
}
/// \brief Emit a diagnostic via the adapted diagnostic client.
void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
// When producing this diagnostic, we temporarily bypass ourselves,
// clear out any current diagnostic, and let the downstream client
// format the diagnostic.
Diags.setClient(Client, false);
Diags.Clear();
Diags.Report(Loc, DiagID);
Diags.setClient(this, false);
}
FixItOptions::~FixItOptions() {}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/CMakeLists.txt | set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangRewrite
DeltaTree.cpp
HTMLRewrite.cpp
RewriteRope.cpp
Rewriter.cpp
TokenRewriter.cpp
LINK_LIBS
clangBasic
clangLex
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/DeltaTree.cpp | //===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the DeltaTree and related classes.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/DeltaTree.h"
#include "clang/Basic/LLVM.h"
#include <cstdio>
#include <cstring>
using namespace clang;
/// The DeltaTree class is a multiway search tree (BTree) structure with some
/// fancy features. B-Trees are generally more memory and cache efficient
/// than binary trees, because they store multiple keys/values in each node.
///
/// DeltaTree implements a key/value mapping from FileIndex to Delta, allowing
/// fast lookup by FileIndex. However, an added (important) bonus is that it
/// can also efficiently tell us the full accumulated delta for a specific
/// file offset as well, without traversing the whole tree.
///
/// The nodes of the tree are made up of instances of two classes:
/// DeltaTreeNode and DeltaTreeInteriorNode. The later subclasses the
/// former and adds children pointers. Each node knows the full delta of all
/// entries (recursively) contained inside of it, which allows us to get the
/// full delta implied by a whole subtree in constant time.
namespace {
/// SourceDelta - As code in the original input buffer is added and deleted,
/// SourceDelta records are used to keep track of how the input SourceLocation
/// object is mapped into the output buffer.
struct SourceDelta {
unsigned FileLoc;
int Delta;
static SourceDelta get(unsigned Loc, int D) {
SourceDelta Delta;
Delta.FileLoc = Loc;
Delta.Delta = D;
return Delta;
}
};
/// DeltaTreeNode - The common part of all nodes.
///
class DeltaTreeNode {
public:
struct InsertResult {
DeltaTreeNode *LHS, *RHS;
SourceDelta Split;
};
private:
friend class DeltaTreeInteriorNode;
/// WidthFactor - This controls the number of K/V slots held in the BTree:
/// how wide it is. Each level of the BTree is guaranteed to have at least
/// WidthFactor-1 K/V pairs (except the root) and may have at most
/// 2*WidthFactor-1 K/V pairs.
enum { WidthFactor = 8 };
/// Values - This tracks the SourceDelta's currently in this node.
///
SourceDelta Values[2*WidthFactor-1];
/// NumValuesUsed - This tracks the number of values this node currently
/// holds.
unsigned char NumValuesUsed;
/// IsLeaf - This is true if this is a leaf of the btree. If false, this is
/// an interior node, and is actually an instance of DeltaTreeInteriorNode.
bool IsLeaf;
/// FullDelta - This is the full delta of all the values in this node and
/// all children nodes.
int FullDelta;
public:
DeltaTreeNode(bool isLeaf = true)
: NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {}
bool isLeaf() const { return IsLeaf; }
int getFullDelta() const { return FullDelta; }
bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; }
unsigned getNumValuesUsed() const { return NumValuesUsed; }
const SourceDelta &getValue(unsigned i) const {
assert(i < NumValuesUsed && "Invalid value #");
return Values[i];
}
SourceDelta &getValue(unsigned i) {
assert(i < NumValuesUsed && "Invalid value #");
return Values[i];
}
/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
/// this node. If insertion is easy, do it and return false. Otherwise,
/// split the node, populate InsertRes with info about the split, and return
/// true.
bool DoInsertion(unsigned FileIndex, int Delta, InsertResult *InsertRes);
void DoSplit(InsertResult &InsertRes);
/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
/// local walk over our contained deltas.
void RecomputeFullDeltaLocally();
void Destroy();
};
} // end anonymous namespace
namespace {
/// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers.
/// This class tracks them.
class DeltaTreeInteriorNode : public DeltaTreeNode {
DeltaTreeNode *Children[2*WidthFactor];
~DeltaTreeInteriorNode() {
for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i)
Children[i]->Destroy();
}
friend class DeltaTreeNode;
public:
DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {}
DeltaTreeInteriorNode(const InsertResult &IR)
: DeltaTreeNode(false /*nonleaf*/) {
Children[0] = IR.LHS;
Children[1] = IR.RHS;
Values[0] = IR.Split;
FullDelta = IR.LHS->getFullDelta()+IR.RHS->getFullDelta()+IR.Split.Delta;
NumValuesUsed = 1;
}
const DeltaTreeNode *getChild(unsigned i) const {
assert(i < getNumValuesUsed()+1 && "Invalid child");
return Children[i];
}
DeltaTreeNode *getChild(unsigned i) {
assert(i < getNumValuesUsed()+1 && "Invalid child");
return Children[i];
}
static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
};
}
/// Destroy - A 'virtual' destructor.
void DeltaTreeNode::Destroy() {
if (isLeaf())
delete this;
else
delete cast<DeltaTreeInteriorNode>(this);
}
/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
/// local walk over our contained deltas.
void DeltaTreeNode::RecomputeFullDeltaLocally() {
int NewFullDelta = 0;
for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i)
NewFullDelta += Values[i].Delta;
if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this))
for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i)
NewFullDelta += IN->getChild(i)->getFullDelta();
FullDelta = NewFullDelta;
}
/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
/// this node. If insertion is easy, do it and return false. Otherwise,
/// split the node, populate InsertRes with info about the split, and return
/// true.
bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta,
InsertResult *InsertRes) {
// Maintain full delta for this node.
FullDelta += Delta;
// Find the insertion point, the first delta whose index is >= FileIndex.
unsigned i = 0, e = getNumValuesUsed();
while (i != e && FileIndex > getValue(i).FileLoc)
++i;
// If we found an a record for exactly this file index, just merge this
// value into the pre-existing record and finish early.
if (i != e && getValue(i).FileLoc == FileIndex) {
// NOTE: Delta could drop to zero here. This means that the delta entry is
// useless and could be removed. Supporting erases is more complex than
// leaving an entry with Delta=0, so we just leave an entry with Delta=0 in
// the tree.
Values[i].Delta += Delta;
return false;
}
// Otherwise, we found an insertion point, and we know that the value at the
// specified index is > FileIndex. Handle the leaf case first.
if (isLeaf()) {
if (!isFull()) {
// For an insertion into a non-full leaf node, just insert the value in
// its sorted position. This requires moving later values over.
if (i != e)
memmove(&Values[i+1], &Values[i], sizeof(Values[0])*(e-i));
Values[i] = SourceDelta::get(FileIndex, Delta);
++NumValuesUsed;
return false;
}
// Otherwise, if this is leaf is full, split the node at its median, insert
// the value into one of the children, and return the result.
assert(InsertRes && "No result location specified");
DoSplit(*InsertRes);
if (InsertRes->Split.FileLoc > FileIndex)
InsertRes->LHS->DoInsertion(FileIndex, Delta, nullptr /*can't fail*/);
else
InsertRes->RHS->DoInsertion(FileIndex, Delta, nullptr /*can't fail*/);
return true;
}
// Otherwise, this is an interior node. Send the request down the tree.
DeltaTreeInteriorNode *IN = cast<DeltaTreeInteriorNode>(this);
if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes))
return false; // If there was space in the child, just return.
// Okay, this split the subtree, producing a new value and two children to
// insert here. If this node is non-full, we can just insert it directly.
if (!isFull()) {
// Now that we have two nodes and a new element, insert the perclated value
// into ourself by moving all the later values/children down, then inserting
// the new one.
if (i != e)
memmove(&IN->Children[i+2], &IN->Children[i+1],
(e-i)*sizeof(IN->Children[0]));
IN->Children[i] = InsertRes->LHS;
IN->Children[i+1] = InsertRes->RHS;
if (e != i)
memmove(&Values[i+1], &Values[i], (e-i)*sizeof(Values[0]));
Values[i] = InsertRes->Split;
++NumValuesUsed;
return false;
}
// Finally, if this interior node was full and a node is percolated up, split
// ourself and return that up the chain. Start by saving all our info to
// avoid having the split clobber it.
IN->Children[i] = InsertRes->LHS;
DeltaTreeNode *SubRHS = InsertRes->RHS;
SourceDelta SubSplit = InsertRes->Split;
// Do the split.
DoSplit(*InsertRes);
// Figure out where to insert SubRHS/NewSplit.
DeltaTreeInteriorNode *InsertSide;
if (SubSplit.FileLoc < InsertRes->Split.FileLoc)
InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->LHS);
else
InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->RHS);
// We now have a non-empty interior node 'InsertSide' to insert
// SubRHS/SubSplit into. Find out where to insert SubSplit.
// Find the insertion point, the first delta whose index is >SubSplit.FileLoc.
i = 0; e = InsertSide->getNumValuesUsed();
while (i != e && SubSplit.FileLoc > InsertSide->getValue(i).FileLoc)
++i;
// Now we know that i is the place to insert the split value into. Insert it
// and the child right after it.
if (i != e)
memmove(&InsertSide->Children[i+2], &InsertSide->Children[i+1],
(e-i)*sizeof(IN->Children[0]));
InsertSide->Children[i+1] = SubRHS;
if (e != i)
memmove(&InsertSide->Values[i+1], &InsertSide->Values[i],
(e-i)*sizeof(Values[0]));
InsertSide->Values[i] = SubSplit;
++InsertSide->NumValuesUsed;
InsertSide->FullDelta += SubSplit.Delta + SubRHS->getFullDelta();
return true;
}
/// DoSplit - Split the currently full node (which has 2*WidthFactor-1 values)
/// into two subtrees each with "WidthFactor-1" values and a pivot value.
/// Return the pieces in InsertRes.
void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
assert(isFull() && "Why split a non-full node?");
// Since this node is full, it contains 2*WidthFactor-1 values. We move
// the first 'WidthFactor-1' values to the LHS child (which we leave in this
// node), propagate one value up, and move the last 'WidthFactor-1' values
// into the RHS child.
// Create the new child node.
DeltaTreeNode *NewNode;
if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
// If this is an interior node, also move over 'WidthFactor' children
// into the new node.
DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode();
memcpy(&New->Children[0], &IN->Children[WidthFactor],
WidthFactor*sizeof(IN->Children[0]));
NewNode = New;
} else {
// Just create the new leaf node.
NewNode = new DeltaTreeNode();
}
// Move over the last 'WidthFactor-1' values from here to NewNode.
memcpy(&NewNode->Values[0], &Values[WidthFactor],
(WidthFactor-1)*sizeof(Values[0]));
// Decrease the number of values in the two nodes.
NewNode->NumValuesUsed = NumValuesUsed = WidthFactor-1;
// Recompute the two nodes' full delta.
NewNode->RecomputeFullDeltaLocally();
RecomputeFullDeltaLocally();
InsertRes.LHS = this;
InsertRes.RHS = NewNode;
InsertRes.Split = Values[WidthFactor-1];
}
//===----------------------------------------------------------------------===//
// DeltaTree Implementation
//===----------------------------------------------------------------------===//
//#define VERIFY_TREE
#ifdef VERIFY_TREE
/// VerifyTree - Walk the btree performing assertions on various properties to
/// verify consistency. This is useful for debugging new changes to the tree.
static void VerifyTree(const DeltaTreeNode *N) {
const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(N);
if (IN == 0) {
// Verify leaves, just ensure that FullDelta matches up and the elements
// are in proper order.
int FullDelta = 0;
for (unsigned i = 0, e = N->getNumValuesUsed(); i != e; ++i) {
if (i)
assert(N->getValue(i-1).FileLoc < N->getValue(i).FileLoc);
FullDelta += N->getValue(i).Delta;
}
assert(FullDelta == N->getFullDelta());
return;
}
// Verify interior nodes: Ensure that FullDelta matches up and the
// elements are in proper order and the children are in proper order.
int FullDelta = 0;
for (unsigned i = 0, e = IN->getNumValuesUsed(); i != e; ++i) {
const SourceDelta &IVal = N->getValue(i);
const DeltaTreeNode *IChild = IN->getChild(i);
if (i)
assert(IN->getValue(i-1).FileLoc < IVal.FileLoc);
FullDelta += IVal.Delta;
FullDelta += IChild->getFullDelta();
// The largest value in child #i should be smaller than FileLoc.
assert(IChild->getValue(IChild->getNumValuesUsed()-1).FileLoc <
IVal.FileLoc);
// The smallest value in child #i+1 should be larger than FileLoc.
assert(IN->getChild(i+1)->getValue(0).FileLoc > IVal.FileLoc);
VerifyTree(IChild);
}
FullDelta += IN->getChild(IN->getNumValuesUsed())->getFullDelta();
assert(FullDelta == N->getFullDelta());
}
#endif // VERIFY_TREE
static DeltaTreeNode *getRoot(void *Root) {
return (DeltaTreeNode*)Root;
}
DeltaTree::DeltaTree() {
Root = new DeltaTreeNode();
}
DeltaTree::DeltaTree(const DeltaTree &RHS) {
// Currently we only support copying when the RHS is empty.
assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 &&
"Can only copy empty tree");
Root = new DeltaTreeNode();
}
DeltaTree::~DeltaTree() {
getRoot(Root)->Destroy();
}
/// getDeltaAt - Return the accumulated delta at the specified file offset.
/// This includes all insertions or delections that occurred *before* the
/// specified file index.
int DeltaTree::getDeltaAt(unsigned FileIndex) const {
const DeltaTreeNode *Node = getRoot(Root);
int Result = 0;
// Walk down the tree.
while (1) {
// For all nodes, include any local deltas before the specified file
// index by summing them up directly. Keep track of how many were
// included.
unsigned NumValsGreater = 0;
for (unsigned e = Node->getNumValuesUsed(); NumValsGreater != e;
++NumValsGreater) {
const SourceDelta &Val = Node->getValue(NumValsGreater);
if (Val.FileLoc >= FileIndex)
break;
Result += Val.Delta;
}
// If we have an interior node, include information about children and
// recurse. Otherwise, if we have a leaf, we're done.
const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
if (!IN) return Result;
// Include any children to the left of the values we skipped, all of
// their deltas should be included as well.
for (unsigned i = 0; i != NumValsGreater; ++i)
Result += IN->getChild(i)->getFullDelta();
// If we found exactly the value we were looking for, break off the
// search early. There is no need to search the RHS of the value for
// partial results.
if (NumValsGreater != Node->getNumValuesUsed() &&
Node->getValue(NumValsGreater).FileLoc == FileIndex)
return Result+IN->getChild(NumValsGreater)->getFullDelta();
// Otherwise, traverse down the tree. The selected subtree may be
// partially included in the range.
Node = IN->getChild(NumValsGreater);
}
// NOT REACHED.
}
/// AddDelta - When a change is made that shifts around the text buffer,
/// this method is used to record that info. It inserts a delta of 'Delta'
/// into the current DeltaTree at offset FileIndex.
void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
assert(Delta && "Adding a noop?");
DeltaTreeNode *MyRoot = getRoot(Root);
DeltaTreeNode::InsertResult InsertRes;
if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) {
Root = MyRoot = new DeltaTreeInteriorNode(InsertRes);
}
#ifdef VERIFY_TREE
VerifyTree(MyRoot);
#endif
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/TokenRewriter.cpp | //===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the TokenRewriter class, which is used for code
// transformations.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/TokenRewriter.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/ScratchBuffer.h"
using namespace clang;
TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
const LangOptions &LangOpts) {
ScratchBuf.reset(new ScratchBuffer(SM));
// Create a lexer to lex all the tokens of the main file in raw mode.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
Lexer RawLex(FID, FromFile, SM, LangOpts);
// Return all comments and whitespace as tokens.
RawLex.SetKeepWhitespaceMode(true);
// Lex the file, populating our datastructures.
Token RawTok;
RawLex.LexFromRawLexer(RawTok);
while (RawTok.isNot(tok::eof)) {
#if 0
if (Tok.is(tok::raw_identifier)) {
// Look up the identifier info for the token. This should use
// IdentifierTable directly instead of PP.
PP.LookUpIdentifierInfo(Tok);
}
#endif
AddToken(RawTok, TokenList.end());
RawLex.LexFromRawLexer(RawTok);
}
}
TokenRewriter::~TokenRewriter() {
}
/// RemapIterator - Convert from token_iterator (a const iterator) to
/// TokenRefTy (a non-const iterator).
TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) {
if (I == token_end()) return TokenList.end();
// FIXME: This is horrible, we should use our own list or something to avoid
// this.
std::map<SourceLocation, TokenRefTy>::iterator MapIt =
TokenAtLoc.find(I->getLocation());
assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?");
return MapIt->second;
}
/// AddToken - Add the specified token into the Rewriter before the other
/// position.
TokenRewriter::TokenRefTy
TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
Where = TokenList.insert(Where, T);
bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(),
Where)).second;
assert(InsertSuccess && "Token location already in rewriter!");
(void)InsertSuccess;
return Where;
}
TokenRewriter::token_iterator
TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
unsigned Len = strlen(Val);
// Plop the string into the scratch buffer, then create a token for this
// string.
Token Tok;
Tok.startToken();
const char *Spelling;
Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling));
Tok.setLength(Len);
// TODO: Form a whole lexer around this and relex the token! For now, just
// set kind to tok::unknown.
Tok.setKind(tok::unknown);
return AddToken(Tok, RemapIterator(I));
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/RewriteRope.cpp | //===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the RewriteRope class, which is a powerful string.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/RewriteRope.h"
#include "clang/Basic/LLVM.h"
#include <algorithm>
using namespace clang;
/// RewriteRope is a "strong" string class, designed to make insertions and
/// deletions in the middle of the string nearly constant time (really, they are
/// O(log N), but with a very low constant factor).
///
/// The implementation of this datastructure is a conceptual linear sequence of
/// RopePiece elements. Each RopePiece represents a view on a separately
/// allocated and reference counted string. This means that splitting a very
/// long string can be done in constant time by splitting a RopePiece that
/// references the whole string into two rope pieces that reference each half.
/// Once split, another string can be inserted in between the two halves by
/// inserting a RopePiece in between the two others. All of this is very
/// inexpensive: it takes time proportional to the number of RopePieces, not the
/// length of the strings they represent.
///
/// While a linear sequences of RopePieces is the conceptual model, the actual
/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which
/// is a tree that keeps the values in the leaves and has where each node
/// contains a reasonable number of pointers to children/values) allows us to
/// maintain efficient operation when the RewriteRope contains a *huge* number
/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find
/// the RopePiece corresponding to some offset very efficiently, and it
/// automatically balances itself on insertions of RopePieces (which can happen
/// for both insertions and erases of string ranges).
///
/// The one wrinkle on the theory is that we don't attempt to keep the tree
/// properly balanced when erases happen. Erases of string data can both insert
/// new RopePieces (e.g. when the middle of some other rope piece is deleted,
/// which results in two rope pieces, which is just like an insert) or it can
/// reduce the number of RopePieces maintained by the B+Tree. In the case when
/// the number of RopePieces is reduced, we don't attempt to maintain the
/// standard 'invariant' that each node in the tree contains at least
/// 'WidthFactor' children/values. For our use cases, this doesn't seem to
/// matter.
///
/// The implementation below is primarily implemented in terms of three classes:
/// RopePieceBTreeNode - Common base class for:
///
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
/// RopePieces contatenated.
/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
/// up to '2*WidthFactor' other nodes in the tree.
//===----------------------------------------------------------------------===//
// RopePieceBTreeNode Class
//===----------------------------------------------------------------------===//
namespace {
/// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and
/// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods
/// and a flag that determines which subclass the instance is. Also
/// important, this node knows the full extend of the node, including any
/// children that it has. This allows efficient skipping over entire subtrees
/// when looking for an offset in the BTree.
class RopePieceBTreeNode {
protected:
/// WidthFactor - This controls the number of K/V slots held in the BTree:
/// how wide it is. Each level of the BTree is guaranteed to have at least
/// 'WidthFactor' elements in it (either ropepieces or children), (except
/// the root, which may have less) and may have at most 2*WidthFactor
/// elements.
enum { WidthFactor = 8 };
/// Size - This is the number of bytes of file this node (including any
/// potential children) covers.
unsigned Size;
/// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it
/// is an instance of RopePieceBTreeInterior.
bool IsLeaf;
RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {}
~RopePieceBTreeNode() = default;
public:
bool isLeaf() const { return IsLeaf; }
unsigned size() const { return Size; }
void Destroy();
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *split(unsigned Offset);
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// RopePieceBTreeLeaf Class
//===----------------------------------------------------------------------===//
namespace {
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
/// RopePieces contatenated. Since this is a B+Tree, all values (in this case
/// instances of RopePiece) are stored in leaves like this. To make iteration
/// over the leaves efficient, they maintain a singly linked list through the
/// NextLeaf field. This allows the B+Tree forward iterator to be constant
/// time for all increments.
class RopePieceBTreeLeaf : public RopePieceBTreeNode {
/// NumPieces - This holds the number of rope pieces currently active in the
/// Pieces array.
unsigned char NumPieces;
/// Pieces - This tracks the file chunks currently in this leaf.
///
RopePiece Pieces[2*WidthFactor];
/// NextLeaf - This is a pointer to the next leaf in the tree, allowing
/// efficient in-order forward iteration of the tree without traversal.
RopePieceBTreeLeaf **PrevLeaf, *NextLeaf;
public:
RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0),
PrevLeaf(nullptr), NextLeaf(nullptr) {}
~RopePieceBTreeLeaf() {
if (PrevLeaf || NextLeaf)
removeFromLeafInOrder();
clear();
}
bool isFull() const { return NumPieces == 2*WidthFactor; }
/// clear - Remove all rope pieces from this leaf.
void clear() {
while (NumPieces)
Pieces[--NumPieces] = RopePiece();
Size = 0;
}
unsigned getNumPieces() const { return NumPieces; }
const RopePiece &getPiece(unsigned i) const {
assert(i < getNumPieces() && "Invalid piece ID");
return Pieces[i];
}
const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; }
void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) {
assert(!PrevLeaf && !NextLeaf && "Already in ordering");
NextLeaf = Node->NextLeaf;
if (NextLeaf)
NextLeaf->PrevLeaf = &NextLeaf;
PrevLeaf = &Node->NextLeaf;
Node->NextLeaf = this;
}
void removeFromLeafInOrder() {
if (PrevLeaf) {
*PrevLeaf = NextLeaf;
if (NextLeaf)
NextLeaf->PrevLeaf = PrevLeaf;
} else if (NextLeaf) {
NextLeaf->PrevLeaf = nullptr;
}
}
/// FullRecomputeSizeLocally - This method recomputes the 'Size' field by
/// summing the size of all RopePieces.
void FullRecomputeSizeLocally() {
Size = 0;
for (unsigned i = 0, e = getNumPieces(); i != e; ++i)
Size += getPiece(i).size();
}
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *split(unsigned Offset);
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
static inline bool classof(const RopePieceBTreeNode *N) {
return N->isLeaf();
}
};
} // end anonymous namespace
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) {
// Find the insertion point. We are guaranteed that there is a split at the
// specified offset so find it.
if (Offset == 0 || Offset == size()) {
// Fastpath for a common case. There is already a splitpoint at the end.
return nullptr;
}
// Find the piece that this offset lands in.
unsigned PieceOffs = 0;
unsigned i = 0;
while (Offset >= PieceOffs+Pieces[i].size()) {
PieceOffs += Pieces[i].size();
++i;
}
// If there is already a split point at the specified offset, just return
// success.
if (PieceOffs == Offset)
return nullptr;
// Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset
// to being Piece relative.
unsigned IntraPieceOffset = Offset-PieceOffs;
// We do this by shrinking the RopePiece and then doing an insert of the tail.
RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset,
Pieces[i].EndOffs);
Size -= Pieces[i].size();
Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset;
Size += Pieces[i].size();
return insert(Offset, Tail);
}
/// insert - Insert the specified RopePiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset,
const RopePiece &R) {
// If this node is not full, insert the piece.
if (!isFull()) {
// Find the insertion point. We are guaranteed that there is a split at the
// specified offset so find it.
unsigned i = 0, e = getNumPieces();
if (Offset == size()) {
// Fastpath for a common case.
i = e;
} else {
unsigned SlotOffs = 0;
for (; Offset > SlotOffs; ++i)
SlotOffs += getPiece(i).size();
assert(SlotOffs == Offset && "Split didn't occur before insertion!");
}
// For an insertion into a non-full leaf node, just insert the value in
// its sorted position. This requires moving later values over.
for (; i != e; --e)
Pieces[e] = Pieces[e-1];
Pieces[i] = R;
++NumPieces;
Size += R.size();
return nullptr;
}
// Otherwise, if this is leaf is full, split it in two halves. Since this
// node is full, it contains 2*WidthFactor values. We move the first
// 'WidthFactor' values to the LHS child (which we leave in this node) and
// move the last 'WidthFactor' values into the RHS child.
// Create the new node.
RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf();
// Move over the last 'WidthFactor' values from here to NewNode.
std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor],
&NewNode->Pieces[0]);
// Replace old pieces with null RopePieces to drop refcounts.
std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece());
// Decrease the number of values in the two nodes.
NewNode->NumPieces = NumPieces = WidthFactor;
// Recompute the two nodes' size.
NewNode->FullRecomputeSizeLocally();
FullRecomputeSizeLocally();
// Update the list of leaves.
NewNode->insertAfterLeafInOrder(this);
// These insertions can't fail.
if (this->size() >= Offset)
this->insert(Offset, R);
else
NewNode->insert(Offset - this->size(), R);
return NewNode;
}
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) {
// Since we are guaranteed that there is a split at Offset, we start by
// finding the Piece that starts there.
unsigned PieceOffs = 0;
unsigned i = 0;
for (; Offset > PieceOffs; ++i)
PieceOffs += getPiece(i).size();
assert(PieceOffs == Offset && "Split didn't occur before erase!");
unsigned StartPiece = i;
// Figure out how many pieces completely cover 'NumBytes'. We want to remove
// all of them.
for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i)
PieceOffs += getPiece(i).size();
// If we exactly include the last one, include it in the region to delete.
if (Offset+NumBytes == PieceOffs+getPiece(i).size())
PieceOffs += getPiece(i).size(), ++i;
// If we completely cover some RopePieces, erase them now.
if (i != StartPiece) {
unsigned NumDeleted = i-StartPiece;
for (; i != getNumPieces(); ++i)
Pieces[i-NumDeleted] = Pieces[i];
// Drop references to dead rope pieces.
std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()],
RopePiece());
NumPieces -= NumDeleted;
unsigned CoverBytes = PieceOffs-Offset;
NumBytes -= CoverBytes;
Size -= CoverBytes;
}
// If we completely removed some stuff, we could be done.
if (NumBytes == 0) return;
// Okay, now might be erasing part of some Piece. If this is the case, then
// move the start point of the piece.
assert(getPiece(StartPiece).size() > NumBytes);
Pieces[StartPiece].StartOffs += NumBytes;
// The size of this node just shrunk by NumBytes.
Size -= NumBytes;
}
//===----------------------------------------------------------------------===//
// RopePieceBTreeInterior Class
//===----------------------------------------------------------------------===//
namespace {
/// RopePieceBTreeInterior - This represents an interior node in the B+Tree,
/// which holds up to 2*WidthFactor pointers to child nodes.
class RopePieceBTreeInterior : public RopePieceBTreeNode {
/// NumChildren - This holds the number of children currently active in the
/// Children array.
unsigned char NumChildren;
RopePieceBTreeNode *Children[2*WidthFactor];
public:
RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {}
RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS)
: RopePieceBTreeNode(false) {
Children[0] = LHS;
Children[1] = RHS;
NumChildren = 2;
Size = LHS->size() + RHS->size();
}
~RopePieceBTreeInterior() {
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
Children[i]->Destroy();
}
bool isFull() const { return NumChildren == 2*WidthFactor; }
unsigned getNumChildren() const { return NumChildren; }
const RopePieceBTreeNode *getChild(unsigned i) const {
assert(i < NumChildren && "invalid child #");
return Children[i];
}
RopePieceBTreeNode *getChild(unsigned i) {
assert(i < NumChildren && "invalid child #");
return Children[i];
}
/// FullRecomputeSizeLocally - Recompute the Size field of this node by
/// summing up the sizes of the child nodes.
void FullRecomputeSizeLocally() {
Size = 0;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
Size += getChild(i)->size();
}
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *split(unsigned Offset);
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
/// HandleChildPiece - A child propagated an insertion result up to us.
/// Insert the new child, and/or propagate the result further up the tree.
RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS);
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
static inline bool classof(const RopePieceBTreeNode *N) {
return !N->isLeaf();
}
};
} // end anonymous namespace
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) {
// Figure out which child to split.
if (Offset == 0 || Offset == size())
return nullptr; // If we have an exact offset, we're already split.
unsigned ChildOffset = 0;
unsigned i = 0;
for (; Offset >= ChildOffset+getChild(i)->size(); ++i)
ChildOffset += getChild(i)->size();
// If already split there, we're done.
if (ChildOffset == Offset)
return nullptr;
// Otherwise, recursively split the child.
if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset))
return HandleChildPiece(i, RHS);
return nullptr; // Done!
}
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset,
const RopePiece &R) {
// Find the insertion point. We are guaranteed that there is a split at the
// specified offset so find it.
unsigned i = 0, e = getNumChildren();
unsigned ChildOffs = 0;
if (Offset == size()) {
// Fastpath for a common case. Insert at end of last child.
i = e-1;
ChildOffs = size()-getChild(i)->size();
} else {
for (; Offset > ChildOffs+getChild(i)->size(); ++i)
ChildOffs += getChild(i)->size();
}
Size += R.size();
// Insert at the end of this child.
if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R))
return HandleChildPiece(i, RHS);
return nullptr;
}
/// HandleChildPiece - A child propagated an insertion result up to us.
/// Insert the new child, and/or propagate the result further up the tree.
RopePieceBTreeNode *
RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) {
// Otherwise the child propagated a subtree up to us as a new child. See if
// we have space for it here.
if (!isFull()) {
// Insert RHS after child 'i'.
if (i + 1 != getNumChildren())
memmove(&Children[i+2], &Children[i+1],
(getNumChildren()-i-1)*sizeof(Children[0]));
Children[i+1] = RHS;
++NumChildren;
return nullptr;
}
// Okay, this node is full. Split it in half, moving WidthFactor children to
// a newly allocated interior node.
// Create the new node.
RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior();
// Move over the last 'WidthFactor' values from here to NewNode.
memcpy(&NewNode->Children[0], &Children[WidthFactor],
WidthFactor*sizeof(Children[0]));
// Decrease the number of values in the two nodes.
NewNode->NumChildren = NumChildren = WidthFactor;
// Finally, insert the two new children in the side the can (now) hold them.
// These insertions can't fail.
if (i < WidthFactor)
this->HandleChildPiece(i, RHS);
else
NewNode->HandleChildPiece(i-WidthFactor, RHS);
// Recompute the two nodes' size.
NewNode->FullRecomputeSizeLocally();
FullRecomputeSizeLocally();
return NewNode;
}
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) {
// This will shrink this node by NumBytes.
Size -= NumBytes;
// Find the first child that overlaps with Offset.
unsigned i = 0;
for (; Offset >= getChild(i)->size(); ++i)
Offset -= getChild(i)->size();
// Propagate the delete request into overlapping children, or completely
// delete the children as appropriate.
while (NumBytes) {
RopePieceBTreeNode *CurChild = getChild(i);
// If we are deleting something contained entirely in the child, pass on the
// request.
if (Offset+NumBytes < CurChild->size()) {
CurChild->erase(Offset, NumBytes);
return;
}
// If this deletion request starts somewhere in the middle of the child, it
// must be deleting to the end of the child.
if (Offset) {
unsigned BytesFromChild = CurChild->size()-Offset;
CurChild->erase(Offset, BytesFromChild);
NumBytes -= BytesFromChild;
// Start at the beginning of the next child.
Offset = 0;
++i;
continue;
}
// If the deletion request completely covers the child, delete it and move
// the rest down.
NumBytes -= CurChild->size();
CurChild->Destroy();
--NumChildren;
if (i != getNumChildren())
memmove(&Children[i], &Children[i+1],
(getNumChildren()-i)*sizeof(Children[0]));
}
}
//===----------------------------------------------------------------------===//
// RopePieceBTreeNode Implementation
//===----------------------------------------------------------------------===//
void RopePieceBTreeNode::Destroy() {
if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
delete Leaf;
else
delete cast<RopePieceBTreeInterior>(this);
}
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
assert(Offset <= size() && "Invalid offset to split!");
if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->split(Offset);
return cast<RopePieceBTreeInterior>(this)->split(Offset);
}
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
///
/// If there is no space in this subtree for the extra piece, the extra tree
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
const RopePiece &R) {
assert(Offset <= size() && "Invalid offset to insert!");
if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->insert(Offset, R);
return cast<RopePieceBTreeInterior>(this)->insert(Offset, R);
}
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) {
assert(Offset+NumBytes <= size() && "Invalid offset to erase!");
if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->erase(Offset, NumBytes);
return cast<RopePieceBTreeInterior>(this)->erase(Offset, NumBytes);
}
//===----------------------------------------------------------------------===//
// RopePieceBTreeIterator Implementation
//===----------------------------------------------------------------------===//
static const RopePieceBTreeLeaf *getCN(const void *P) {
return static_cast<const RopePieceBTreeLeaf*>(P);
}
// begin iterator.
RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) {
const RopePieceBTreeNode *N = static_cast<const RopePieceBTreeNode*>(n);
// Walk down the left side of the tree until we get to a leaf.
while (const RopePieceBTreeInterior *IN = dyn_cast<RopePieceBTreeInterior>(N))
N = IN->getChild(0);
// We must have at least one leaf.
CurNode = cast<RopePieceBTreeLeaf>(N);
// If we found a leaf that happens to be empty, skip over it until we get
// to something full.
while (CurNode && getCN(CurNode)->getNumPieces() == 0)
CurNode = getCN(CurNode)->getNextLeafInOrder();
if (CurNode)
CurPiece = &getCN(CurNode)->getPiece(0);
else // Empty tree, this is an end() iterator.
CurPiece = nullptr;
CurChar = 0;
}
void RopePieceBTreeIterator::MoveToNextPiece() {
if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) {
CurChar = 0;
++CurPiece;
return;
}
// Find the next non-empty leaf node.
do
CurNode = getCN(CurNode)->getNextLeafInOrder();
while (CurNode && getCN(CurNode)->getNumPieces() == 0);
if (CurNode)
CurPiece = &getCN(CurNode)->getPiece(0);
else // Hit end().
CurPiece = nullptr;
CurChar = 0;
}
//===----------------------------------------------------------------------===//
// RopePieceBTree Implementation
//===----------------------------------------------------------------------===//
static RopePieceBTreeNode *getRoot(void *P) {
return static_cast<RopePieceBTreeNode*>(P);
}
RopePieceBTree::RopePieceBTree() {
Root = new RopePieceBTreeLeaf();
}
RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) {
assert(RHS.empty() && "Can't copy non-empty tree yet");
Root = new RopePieceBTreeLeaf();
}
RopePieceBTree::~RopePieceBTree() {
getRoot(Root)->Destroy();
}
unsigned RopePieceBTree::size() const {
return getRoot(Root)->size();
}
void RopePieceBTree::clear() {
if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
Leaf->clear();
else {
getRoot(Root)->Destroy();
Root = new RopePieceBTreeLeaf();
}
}
void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) {
// #1. Split at Offset.
if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
// #2. Do the insertion.
if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R))
Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
}
void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) {
// #1. Split at Offset.
if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
// #2. Do the erasing.
getRoot(Root)->erase(Offset, NumBytes);
}
//===----------------------------------------------------------------------===//
// RewriteRope Implementation
//===----------------------------------------------------------------------===//
/// MakeRopeString - This copies the specified byte range into some instance of
/// RopeRefCountString, and return a RopePiece that represents it. This uses
/// the AllocBuffer object to aggregate requests for small strings into one
/// allocation instead of doing tons of tiny allocations.
RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
unsigned Len = End-Start;
assert(Len && "Zero length RopePiece is invalid!");
// If we have space for this string in the current alloc buffer, use it.
if (AllocOffs+Len <= AllocChunkSize) {
memcpy(AllocBuffer->Data+AllocOffs, Start, Len);
AllocOffs += Len;
return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs);
}
// If we don't have enough room because this specific allocation is huge,
// just allocate a new rope piece for it alone.
if (Len > AllocChunkSize) {
unsigned Size = End-Start+sizeof(RopeRefCountString)-1;
RopeRefCountString *Res =
reinterpret_cast<RopeRefCountString *>(new char[Size]);
Res->RefCount = 0;
memcpy(Res->Data, Start, End-Start);
return RopePiece(Res, 0, End-Start);
}
// Otherwise, this was a small request but we just don't have space for it
// Make a new chunk and share it with later allocations.
unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
RopeRefCountString *Res =
reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
Res->RefCount = 0;
memcpy(Res->Data, Start, Len);
AllocBuffer = Res;
AllocOffs = Len;
return RopePiece(AllocBuffer, 0, Len);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/HTMLRewrite.cpp | //== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the HTMLRewriter class, which is used to translate the
// text of a source file into prettified HTML.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/TokenConcatenation.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
using namespace clang;
/// HighlightRange - Highlight a range in the source code with the specified
/// start/end tags. B/E must be in the same file. This ensures that
/// start/end tags are placed at the start/end of each line if the range is
/// multiline.
void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
const char *StartTag, const char *EndTag) {
SourceManager &SM = R.getSourceMgr();
B = SM.getExpansionLoc(B);
E = SM.getExpansionLoc(E);
FileID FID = SM.getFileID(B);
assert(SM.getFileID(E) == FID && "B/E not in the same file!");
unsigned BOffset = SM.getFileOffset(B);
unsigned EOffset = SM.getFileOffset(E);
// Include the whole end token in the range.
EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
bool Invalid = false;
const char *BufferStart = SM.getBufferData(FID, &Invalid).data();
if (Invalid)
return;
HighlightRange(R.getEditBuffer(FID), BOffset, EOffset,
BufferStart, StartTag, EndTag);
}
/// HighlightRange - This is the same as the above method, but takes
/// decomposed file locations.
void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
const char *BufferStart,
const char *StartTag, const char *EndTag) {
// Insert the tag at the absolute start/end of the range.
RB.InsertTextAfter(B, StartTag);
RB.InsertTextBefore(E, EndTag);
// Scan the range to see if there is a \r or \n. If so, and if the line is
// not blank, insert tags on that line as well.
bool HadOpenTag = true;
unsigned LastNonWhiteSpace = B;
for (unsigned i = B; i != E; ++i) {
switch (BufferStart[i]) {
case '\r':
case '\n':
// Okay, we found a newline in the range. If we have an open tag, we need
// to insert a close tag at the first non-whitespace before the newline.
if (HadOpenTag)
RB.InsertTextBefore(LastNonWhiteSpace+1, EndTag);
// Instead of inserting an open tag immediately after the newline, we
// wait until we see a non-whitespace character. This prevents us from
// inserting tags around blank lines, and also allows the open tag to
// be put *after* whitespace on a non-blank line.
HadOpenTag = false;
break;
case '\0':
case ' ':
case '\t':
case '\f':
case '\v':
// Ignore whitespace.
break;
default:
// If there is no tag open, do it now.
if (!HadOpenTag) {
RB.InsertTextAfter(i, StartTag);
HadOpenTag = true;
}
// Remember this character.
LastNonWhiteSpace = i;
break;
}
}
}
void html::EscapeText(Rewriter &R, FileID FID,
bool EscapeSpaces, bool ReplaceTabs) {
const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
const char* C = Buf->getBufferStart();
const char* FileEnd = Buf->getBufferEnd();
assert (C <= FileEnd);
RewriteBuffer &RB = R.getEditBuffer(FID);
unsigned ColNo = 0;
for (unsigned FilePos = 0; C != FileEnd ; ++C, ++FilePos) {
switch (*C) {
default: ++ColNo; break;
case '\n':
case '\r':
ColNo = 0;
break;
case ' ':
if (EscapeSpaces)
RB.ReplaceText(FilePos, 1, " ");
++ColNo;
break;
case '\f':
RB.ReplaceText(FilePos, 1, "<hr>");
ColNo = 0;
break;
case '\t': {
if (!ReplaceTabs)
break;
unsigned NumSpaces = 8-(ColNo&7);
if (EscapeSpaces)
RB.ReplaceText(FilePos, 1,
StringRef(" "
" ", 6*NumSpaces));
else
RB.ReplaceText(FilePos, 1, StringRef(" ", NumSpaces));
ColNo += NumSpaces;
break;
}
case '<':
RB.ReplaceText(FilePos, 1, "<");
++ColNo;
break;
case '>':
RB.ReplaceText(FilePos, 1, ">");
++ColNo;
break;
case '&':
RB.ReplaceText(FilePos, 1, "&");
++ColNo;
break;
}
}
}
std::string html::EscapeText(StringRef s, bool EscapeSpaces, bool ReplaceTabs) {
unsigned len = s.size();
std::string Str;
llvm::raw_string_ostream os(Str);
for (unsigned i = 0 ; i < len; ++i) {
char c = s[i];
switch (c) {
default:
os << c; break;
case ' ':
if (EscapeSpaces) os << " ";
else os << ' ';
break;
case '\t':
if (ReplaceTabs) {
if (EscapeSpaces)
for (unsigned i = 0; i < 4; ++i)
os << " ";
else
for (unsigned i = 0; i < 4; ++i)
os << " ";
}
else
os << c;
break;
case '<': os << "<"; break;
case '>': os << ">"; break;
case '&': os << "&"; break;
}
}
return os.str();
}
static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
unsigned B, unsigned E) {
SmallString<256> Str;
llvm::raw_svector_ostream OS(Str);
OS << "<tr><td class=\"num\" id=\"LN"
<< LineNo << "\">"
<< LineNo << "</td><td class=\"line\">";
if (B == E) { // Handle empty lines.
OS << " </td></tr>";
RB.InsertTextBefore(B, OS.str());
} else {
RB.InsertTextBefore(B, OS.str());
RB.InsertTextBefore(E, "</td></tr>");
}
}
void html::AddLineNumbers(Rewriter& R, FileID FID) {
const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
const char* FileBeg = Buf->getBufferStart();
const char* FileEnd = Buf->getBufferEnd();
const char* C = FileBeg;
RewriteBuffer &RB = R.getEditBuffer(FID);
assert (C <= FileEnd);
unsigned LineNo = 0;
unsigned FilePos = 0;
while (C != FileEnd) {
++LineNo;
unsigned LineStartPos = FilePos;
unsigned LineEndPos = FileEnd - FileBeg;
assert (FilePos <= LineEndPos);
assert (C < FileEnd);
// Scan until the newline (or end-of-file).
while (C != FileEnd) {
char c = *C;
++C;
if (c == '\n') {
LineEndPos = FilePos++;
break;
}
++FilePos;
}
AddLineNumber(RB, LineNo, LineStartPos, LineEndPos);
}
// Add one big table tag that surrounds all of the code.
RB.InsertTextBefore(0, "<table class=\"code\">\n");
RB.InsertTextAfter(FileEnd - FileBeg, "</table>");
}
void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
const char *title) {
const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
const char* FileStart = Buf->getBufferStart();
const char* FileEnd = Buf->getBufferEnd();
SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID);
SourceLocation EndLoc = StartLoc.getLocWithOffset(FileEnd-FileStart);
std::string s;
llvm::raw_string_ostream os(s);
os << "<!doctype html>\n" // Use HTML 5 doctype
"<html>\n<head>\n";
if (title)
os << "<title>" << html::EscapeText(title) << "</title>\n";
os << "<style type=\"text/css\">\n"
" body { color:#000000; background-color:#ffffff }\n"
" body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
" h1 { font-size:14pt }\n"
" .code { border-collapse:collapse; width:100%; }\n"
" .code { font-family: \"Monospace\", monospace; font-size:10pt }\n"
" .code { line-height: 1.2em }\n"
" .comment { color: green; font-style: oblique }\n"
" .keyword { color: blue }\n"
" .string_literal { color: red }\n"
" .directive { color: darkmagenta }\n"
// Macro expansions.
" .expansion { display: none; }\n"
" .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
"padding: 2px; background-color:#FFF0F0; font-weight: normal; "
" -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
"position: absolute; top: -1em; left:10em; z-index: 1 } \n"
" .macro { color: darkmagenta; background-color:LemonChiffon;"
// Macros are position: relative to provide base for expansions.
" position: relative }\n"
" .num { width:2.5em; padding-right:2ex; background-color:#eeeeee }\n"
" .num { text-align:right; font-size:8pt }\n"
" .num { color:#444444 }\n"
" .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
" .line { white-space: pre }\n"
" .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
" .msg { -webkit-border-radius:5px }\n"
" .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
" .msg { float:left }\n"
" .msg { padding:0.25em 1ex 0.25em 1ex }\n"
" .msg { margin-top:10px; margin-bottom:10px }\n"
" .msg { font-weight:bold }\n"
" .msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }\n"
" .msgT { padding:0x; spacing:0x }\n"
" .msgEvent { background-color:#fff8b4; color:#000000 }\n"
" .msgControl { background-color:#bbbbbb; color:#000000 }\n"
" .mrange { background-color:#dfddf3 }\n"
" .mrange { border-bottom:1px solid #6F9DBE }\n"
" .PathIndex { font-weight: bold; padding:0px 5px; "
"margin-right:5px; }\n"
" .PathIndex { -webkit-border-radius:8px }\n"
" .PathIndexEvent { background-color:#bfba87 }\n"
" .PathIndexControl { background-color:#8c8c8c }\n"
" .PathNav a { text-decoration:none; font-size: larger }\n"
" .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
" .CodeRemovalHint { background-color:#de1010 }\n"
" .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
" table.simpletable {\n"
" padding: 5px;\n"
" font-size:12pt;\n"
" margin:20px;\n"
" border-collapse: collapse; border-spacing: 0px;\n"
" }\n"
" td.rowname {\n"
" text-align:right; font-weight:bold; color:#444444;\n"
" padding-right:2ex; }\n"
"</style>\n</head>\n<body>";
// Generate header
R.InsertTextBefore(StartLoc, os.str());
// Generate footer
R.InsertTextAfter(EndLoc, "</body></html>\n");
}
/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
/// information about keywords, macro expansions etc. This uses the macro
/// table state from the end of the file, so it won't be perfectly perfect,
/// but it will be reasonably close.
void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
RewriteBuffer &RB = R.getEditBuffer(FID);
const SourceManager &SM = PP.getSourceManager();
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
Lexer L(FID, FromFile, SM, PP.getLangOpts());
const char *BufferStart = L.getBuffer().data();
// Inform the preprocessor that we want to retain comments as tokens, so we
// can highlight them.
L.SetCommentRetentionState(true);
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
// macros.
Token Tok;
L.LexFromRawLexer(Tok);
while (Tok.isNot(tok::eof)) {
// Since we are lexing unexpanded tokens, all tokens are from the main
// FileID.
unsigned TokOffs = SM.getFileOffset(Tok.getLocation());
unsigned TokLen = Tok.getLength();
switch (Tok.getKind()) {
default: break;
case tok::identifier:
llvm_unreachable("tok::identifier in raw lexing mode!");
case tok::raw_identifier: {
// Fill in Result.IdentifierInfo and update the token kind,
// looking up the identifier in the identifier table.
PP.LookUpIdentifierInfo(Tok);
// If this is a pp-identifier, for a keyword, highlight it as such.
if (Tok.isNot(tok::identifier))
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
"<span class='keyword'>", "</span>");
break;
}
case tok::comment:
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
"<span class='comment'>", "</span>");
break;
case tok::utf8_string_literal:
// Chop off the u part of u8 prefix
++TokOffs;
--TokLen;
// FALL THROUGH to chop the 8
LLVM_FALLTHROUGH; // HLSL Change
case tok::wide_string_literal:
case tok::utf16_string_literal:
case tok::utf32_string_literal:
// Chop off the L, u, U or 8 prefix
++TokOffs;
--TokLen;
LLVM_FALLTHROUGH; // HLSL Change
case tok::string_literal:
// FIXME: Exclude the optional ud-suffix from the highlighted range.
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
"<span class='string_literal'>", "</span>");
break;
case tok::hash: {
// If this is a preprocessor directive, all tokens to end of line are too.
if (!Tok.isAtStartOfLine())
break;
// Eat all of the tokens until we get to the next one at the start of
// line.
unsigned TokEnd = TokOffs+TokLen;
L.LexFromRawLexer(Tok);
while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength();
L.LexFromRawLexer(Tok);
}
// Find end of line. This is a hack.
HighlightRange(RB, TokOffs, TokEnd, BufferStart,
"<span class='directive'>", "</span>");
// Don't skip the next token.
continue;
}
}
L.LexFromRawLexer(Tok);
}
}
/// HighlightMacros - This uses the macro table state from the end of the
/// file, to re-expand macros and insert (into the HTML) information about the
/// macro expansions. This won't be perfectly perfect, but it will be
/// reasonably close.
void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Re-lex the raw token stream into a token buffer.
const SourceManager &SM = PP.getSourceManager();
std::vector<Token> TokenStream;
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
Lexer L(FID, FromFile, SM, PP.getLangOpts());
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
// macros.
while (1) {
Token Tok;
L.LexFromRawLexer(Tok);
// If this is a # at the start of a line, discard it from the token stream.
// We don't want the re-preprocess step to see #defines, #includes or other
// preprocessor directives.
if (Tok.is(tok::hash) && Tok.isAtStartOfLine())
continue;
// If this is a ## token, change its kind to unknown so that repreprocessing
// it will not produce an error.
if (Tok.is(tok::hashhash))
Tok.setKind(tok::unknown);
// If this raw token is an identifier, the raw lexer won't have looked up
// the corresponding identifier info for it. Do this now so that it will be
// macro expanded when we re-preprocess it.
if (Tok.is(tok::raw_identifier))
PP.LookUpIdentifierInfo(Tok);
TokenStream.push_back(Tok);
if (Tok.is(tok::eof)) break;
}
// Temporarily change the diagnostics object so that we ignore any generated
// diagnostics from this pass.
DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
&PP.getDiagnostics().getDiagnosticOptions(),
new IgnoringDiagConsumer);
// FIXME: This is a huge hack; we reuse the input preprocessor because we want
// its state, but we aren't actually changing it (we hope). This should really
// construct a copy of the preprocessor.
Preprocessor &TmpPP = const_cast<Preprocessor&>(PP);
DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics();
TmpPP.setDiagnostics(TmpDiags);
// Inform the preprocessor that we don't want comments.
TmpPP.SetCommentRetentionState(false, false);
// We don't want pragmas either. Although we filtered out #pragma, removing
// _Pragma and __pragma is much harder.
bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled();
TmpPP.setPragmasEnabled(false);
// Enter the tokens we just lexed. This will cause them to be macro expanded
// but won't enter sub-files (because we removed #'s).
TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
TokenConcatenation ConcatInfo(TmpPP);
// Lex all the tokens.
Token Tok;
TmpPP.Lex(Tok);
while (Tok.isNot(tok::eof)) {
// Ignore non-macro tokens.
if (!Tok.getLocation().isMacroID()) {
TmpPP.Lex(Tok);
continue;
}
// Okay, we have the first token of a macro expansion: highlight the
// expansion by inserting a start tag before the macro expansion and
// end tag after it.
std::pair<SourceLocation, SourceLocation> LLoc =
SM.getExpansionRange(Tok.getLocation());
// Ignore tokens whose instantiation location was not the main file.
if (SM.getFileID(LLoc.first) != FID) {
TmpPP.Lex(Tok);
continue;
}
assert(SM.getFileID(LLoc.second) == FID &&
"Start and end of expansion must be in the same ultimate file!");
std::string Expansion = EscapeText(TmpPP.getSpelling(Tok));
unsigned LineLen = Expansion.size();
Token PrevPrevTok;
Token PrevTok = Tok;
// Okay, eat this token, getting the next one.
TmpPP.Lex(Tok);
// Skip all the rest of the tokens that are part of this macro
// instantiation. It would be really nice to pop up a window with all the
// spelling of the tokens or something.
while (!Tok.is(tok::eof) &&
SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) {
// Insert a newline if the macro expansion is getting large.
if (LineLen > 60) {
Expansion += "<br>";
LineLen = 0;
}
LineLen -= Expansion.size();
// If the tokens were already space separated, or if they must be to avoid
// them being implicitly pasted, add a space between them.
if (Tok.hasLeadingSpace() ||
ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok))
Expansion += ' ';
// Escape any special characters in the token text.
Expansion += EscapeText(TmpPP.getSpelling(Tok));
LineLen += Expansion.size();
PrevPrevTok = PrevTok;
PrevTok = Tok;
TmpPP.Lex(Tok);
}
// Insert the expansion as the end tag, so that multi-line macros all get
// highlighted.
Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
HighlightRange(R, LLoc.first, LLoc.second,
"<span class='macro'>", Expansion.c_str());
}
// Restore the preprocessor's old state.
TmpPP.setDiagnostics(*OldDiags);
TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Rewrite/Rewriter.cpp | //===--- Rewriter.cpp - Code rewriting interface --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Rewriter class, which is used for code
// transformations.
//
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
// Walk RewriteRope chunks efficiently using MoveToNextPiece() instead of the
// character iterator.
for (RopePieceBTreeIterator I = begin(), E = end(); I != E;
I.MoveToNextPiece())
os << I.piece();
return os;
}
/// \brief Return true if this character is non-new-line whitespace:
/// ' ', '\\t', '\\f', '\\v', '\\r'.
static inline bool isWhitespace(unsigned char c) {
switch (c) {
case ' ':
case '\t':
case '\f':
case '\v':
case '\r':
return true;
default:
return false;
}
}
void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size,
bool removeLineIfEmpty) {
// Nothing to remove, exit early.
if (Size == 0) return;
unsigned RealOffset = getMappedOffset(OrigOffset, true);
assert(RealOffset+Size <= Buffer.size() && "Invalid location");
// Remove the dead characters.
Buffer.erase(RealOffset, Size);
// Add a delta so that future changes are offset correctly.
AddReplaceDelta(OrigOffset, -Size);
if (removeLineIfEmpty) {
// Find the line that the remove occurred and if it is completely empty
// remove the line as well.
iterator curLineStart = begin();
unsigned curLineStartOffs = 0;
iterator posI = begin();
for (unsigned i = 0; i != RealOffset; ++i) {
if (*posI == '\n') {
curLineStart = posI;
++curLineStart;
curLineStartOffs = i + 1;
}
++posI;
}
unsigned lineSize = 0;
posI = curLineStart;
while (posI != end() && isWhitespace(*posI)) {
++posI;
++lineSize;
}
if (posI != end() && *posI == '\n') {
Buffer.erase(curLineStartOffs, lineSize + 1/* + '\n'*/);
AddReplaceDelta(curLineStartOffs, -(lineSize + 1/* + '\n'*/));
}
}
}
void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str,
bool InsertAfter) {
// Nothing to insert, exit early.
if (Str.empty()) return;
unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter);
Buffer.insert(RealOffset, Str.begin(), Str.end());
// Add a delta so that future changes are offset correctly.
AddInsertDelta(OrigOffset, Str.size());
}
/// ReplaceText - This method replaces a range of characters in the input
/// buffer with a new string. This is effectively a combined "remove+insert"
/// operation.
void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
StringRef NewStr) {
unsigned RealOffset = getMappedOffset(OrigOffset, true);
Buffer.erase(RealOffset, OrigLength);
Buffer.insert(RealOffset, NewStr.begin(), NewStr.end());
if (OrigLength != NewStr.size())
AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength);
}
//===----------------------------------------------------------------------===//
// Rewriter class
//===----------------------------------------------------------------------===//
/// getRangeSize - Return the size in bytes of the specified range if they
/// are in the same file. If not, this returns -1.
int Rewriter::getRangeSize(const CharSourceRange &Range,
RewriteOptions opts) const {
if (!isRewritable(Range.getBegin()) ||
!isRewritable(Range.getEnd())) return -1;
FileID StartFileID, EndFileID;
unsigned StartOff, EndOff;
StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
if (StartFileID != EndFileID)
return -1;
// If edits have been made to this buffer, the delta between the range may
// have changed.
std::map<FileID, RewriteBuffer>::const_iterator I =
RewriteBuffers.find(StartFileID);
if (I != RewriteBuffers.end()) {
const RewriteBuffer &RB = I->second;
EndOff = RB.getMappedOffset(EndOff, opts.IncludeInsertsAtEndOfRange);
StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange);
}
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token if this is a token range.
if (Range.isTokenRange())
EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
return EndOff-StartOff;
}
int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const {
return getRangeSize(CharSourceRange::getTokenRange(Range), opts);
}
/// getRewrittenText - Return the rewritten form of the text in the specified
/// range. If the start or end of the range was unrewritable or if they are
/// in different buffers, this returns an empty string.
///
/// Note that this method is not particularly efficient.
///
std::string Rewriter::getRewrittenText(SourceRange Range) const {
if (!isRewritable(Range.getBegin()) ||
!isRewritable(Range.getEnd()))
return "";
FileID StartFileID, EndFileID;
unsigned StartOff, EndOff;
StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
if (StartFileID != EndFileID)
return ""; // Start and end in different buffers.
// If edits have been made to this buffer, the delta between the range may
// have changed.
std::map<FileID, RewriteBuffer>::const_iterator I =
RewriteBuffers.find(StartFileID);
if (I == RewriteBuffers.end()) {
// If the buffer hasn't been rewritten, just return the text from the input.
const char *Ptr = SourceMgr->getCharacterData(Range.getBegin());
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token.
EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
return std::string(Ptr, Ptr+EndOff-StartOff);
}
const RewriteBuffer &RB = I->second;
EndOff = RB.getMappedOffset(EndOff, true);
StartOff = RB.getMappedOffset(StartOff);
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token.
EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
// Advance the iterators to the right spot, yay for linear time algorithms.
RewriteBuffer::iterator Start = RB.begin();
std::advance(Start, StartOff);
RewriteBuffer::iterator End = Start;
std::advance(End, EndOff-StartOff);
return std::string(Start, End);
}
unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc,
FileID &FID) const {
assert(Loc.isValid() && "Invalid location");
std::pair<FileID,unsigned> V = SourceMgr->getDecomposedLoc(Loc);
FID = V.first;
return V.second;
}
/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID.
///
RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
std::map<FileID, RewriteBuffer>::iterator I =
RewriteBuffers.lower_bound(FID);
if (I != RewriteBuffers.end() && I->first == FID)
return I->second;
I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer()));
StringRef MB = SourceMgr->getBufferData(FID);
I->second.Initialize(MB.begin(), MB.end());
return I->second;
}
/// InsertText - Insert the specified string at the specified location in the
/// original buffer.
bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
bool InsertAfter, bool indentNewLines) {
if (!isRewritable(Loc)) return true;
FileID FID;
unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
SmallString<128> indentedStr;
if (indentNewLines && Str.find('\n') != StringRef::npos) {
StringRef MB = SourceMgr->getBufferData(FID);
unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
const SrcMgr::ContentCache *
Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
unsigned lineOffs = Content->SourceLineCache[lineNo];
// Find the whitespace at the start of the line.
StringRef indentSpace;
{
unsigned i = lineOffs;
while (isWhitespace(MB[i]))
++i;
indentSpace = MB.substr(lineOffs, i-lineOffs);
}
SmallVector<StringRef, 4> lines;
Str.split(lines, "\n");
for (unsigned i = 0, e = lines.size(); i != e; ++i) {
indentedStr += lines[i];
if (i < e-1) {
indentedStr += '\n';
indentedStr += indentSpace;
}
}
Str = indentedStr.str();
}
getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter);
return false;
}
bool Rewriter::InsertTextAfterToken(SourceLocation Loc, StringRef Str) {
if (!isRewritable(Loc)) return true;
FileID FID;
unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
RewriteOptions rangeOpts;
rangeOpts.IncludeInsertsAtBeginOfRange = false;
StartOffs += getRangeSize(SourceRange(Loc, Loc), rangeOpts);
getEditBuffer(FID).InsertText(StartOffs, Str, /*InsertAfter*/true);
return false;
}
/// RemoveText - Remove the specified text region.
bool Rewriter::RemoveText(SourceLocation Start, unsigned Length,
RewriteOptions opts) {
if (!isRewritable(Start)) return true;
FileID FID;
unsigned StartOffs = getLocationOffsetAndFileID(Start, FID);
getEditBuffer(FID).RemoveText(StartOffs, Length, opts.RemoveLineIfEmpty);
return false;
}
/// ReplaceText - This method replaces a range of characters in the input
/// buffer with a new string. This is effectively a combined "remove/insert"
/// operation.
bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength,
StringRef NewStr) {
if (!isRewritable(Start)) return true;
FileID StartFileID;
unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID);
getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength, NewStr);
return false;
}
bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) {
if (!isRewritable(range.getBegin())) return true;
if (!isRewritable(range.getEnd())) return true;
if (replacementRange.isInvalid()) return true;
SourceLocation start = range.getBegin();
unsigned origLength = getRangeSize(range);
unsigned newLength = getRangeSize(replacementRange);
FileID FID;
unsigned newOffs = getLocationOffsetAndFileID(replacementRange.getBegin(),
FID);
StringRef MB = SourceMgr->getBufferData(FID);
return ReplaceText(start, origLength, MB.substr(newOffs, newLength));
}
bool Rewriter::IncreaseIndentation(CharSourceRange range,
SourceLocation parentIndent) {
if (range.isInvalid()) return true;
if (!isRewritable(range.getBegin())) return true;
if (!isRewritable(range.getEnd())) return true;
if (!isRewritable(parentIndent)) return true;
FileID StartFileID, EndFileID, parentFileID;
unsigned StartOff, EndOff, parentOff;
StartOff = getLocationOffsetAndFileID(range.getBegin(), StartFileID);
EndOff = getLocationOffsetAndFileID(range.getEnd(), EndFileID);
parentOff = getLocationOffsetAndFileID(parentIndent, parentFileID);
if (StartFileID != EndFileID || StartFileID != parentFileID)
return true;
if (StartOff > EndOff)
return true;
FileID FID = StartFileID;
StringRef MB = SourceMgr->getBufferData(FID);
unsigned parentLineNo = SourceMgr->getLineNumber(FID, parentOff) - 1;
unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1;
unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1;
const SrcMgr::ContentCache *
Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
// Find where the lines start.
unsigned parentLineOffs = Content->SourceLineCache[parentLineNo];
unsigned startLineOffs = Content->SourceLineCache[startLineNo];
// Find the whitespace at the start of each line.
StringRef parentSpace, startSpace;
{
unsigned i = parentLineOffs;
while (isWhitespace(MB[i]))
++i;
parentSpace = MB.substr(parentLineOffs, i-parentLineOffs);
i = startLineOffs;
while (isWhitespace(MB[i]))
++i;
startSpace = MB.substr(startLineOffs, i-startLineOffs);
}
if (parentSpace.size() >= startSpace.size())
return true;
if (!startSpace.startswith(parentSpace))
return true;
StringRef indent = startSpace.substr(parentSpace.size());
// Indent the lines between start/end offsets.
RewriteBuffer &RB = getEditBuffer(FID);
for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) {
unsigned offs = Content->SourceLineCache[lineNo];
unsigned i = offs;
while (isWhitespace(MB[i]))
++i;
StringRef origIndent = MB.substr(offs, i-offs);
if (origIndent.startswith(startSpace))
RB.InsertText(offs, indent, /*InsertAfter=*/false);
}
return false;
}
namespace {
// A wrapper for a file stream that atomically overwrites the target.
//
// Creates a file output stream for a temporary file in the constructor,
// which is later accessible via getStream() if ok() return true.
// Flushes the stream and moves the temporary file to the target location
// in the destructor.
class AtomicallyMovedFile {
public:
AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
bool &AllWritten)
: Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
TempFilename = Filename;
TempFilename += "-%%%%%%%%";
int FD;
if (llvm::sys::fs::createUniqueFile(TempFilename.str(), FD, TempFilename)) {
AllWritten = false;
Diagnostics.Report(clang::diag::err_unable_to_make_temp)
<< TempFilename;
} else {
FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true));
}
}
~AtomicallyMovedFile() {
if (!ok()) return;
FileStream->flush();
#ifdef LLVM_ON_WIN32
// Win32 does not allow rename/removing opened files.
FileStream.reset();
#endif
if (std::error_code ec =
llvm::sys::fs::rename(TempFilename.str(), Filename)) {
AllWritten = false;
Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
<< TempFilename << Filename << ec.message();
// If the remove fails, there's not a lot we can do - this is already an
// error.
llvm::sys::fs::remove(TempFilename.str());
}
}
bool ok() { return (bool)FileStream; }
raw_ostream &getStream() { return *FileStream; }
private:
DiagnosticsEngine &Diagnostics;
StringRef Filename;
SmallString<128> TempFilename;
std::unique_ptr<llvm::raw_fd_ostream> FileStream;
bool &AllWritten;
};
} // end anonymous namespace
bool Rewriter::overwriteChangedFiles() {
bool AllWritten = true;
for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
const FileEntry *Entry =
getSourceMgr().getFileEntryForID(I->first);
AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(),
AllWritten);
if (File.ok()) {
I->second.write(File.getStream());
}
}
return !AllWritten;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/FrontendTool/CMakeLists.txt | set(LLVM_LINK_COMPONENTS
Option
Support
)
set(link_libs
clangBasic
clangCodeGen
clangDriver
clangFrontend
clangRewriteFrontend
)
if(CLANG_ENABLE_ARCMT)
list(APPEND link_libs
clangARCMigrate
)
endif()
if(CLANG_ENABLE_STATIC_ANALYZER)
list(APPEND link_libs
clangStaticAnalyzerFrontend
)
endif()
add_clang_library(clangFrontendTool
ExecuteCompilerInvocation.cpp
DEPENDS
ClangDriverOptions
LINK_LIBS
${link_libs}
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp | //===--- ExecuteCompilerInvocation.cpp ------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file holds ExecuteCompilerInvocation(). It is split into its own file to
// minimize the impact of pulling in essentially everything else in Clang.
//
//===----------------------------------------------------------------------===//
#include "clang/FrontendTool/Utils.h"
// #include "clang/ARCMigrate/ARCMTActions.h" // HLSL Change
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Driver/Options.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/Utils.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace llvm::opt;
static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
using namespace clang::frontend;
StringRef Action("unknown");
(void)Action;
switch (CI.getFrontendOpts().ProgramAction) {
case ASTDeclList: return new ASTDeclListAction();
case ASTDump: return new ASTDumpAction();
case ASTPrint: return new ASTPrintAction();
case ASTView: return new ASTViewAction();
case DumpRawTokens: return new DumpRawTokensAction();
case DumpTokens: return new DumpTokensAction();
case EmitAssembly: return new EmitAssemblyAction();
case EmitBC: return new EmitBCAction();
case EmitHTML: return new HTMLPrintAction();
case EmitLLVM: return new EmitLLVMAction();
case EmitLLVMOnly: return new EmitLLVMOnlyAction();
case EmitCodeGenOnly: return new EmitCodeGenOnlyAction();
case EmitObj: return new EmitObjAction();
case FixIt: return new FixItAction();
//case GenerateModule: return new GenerateModuleAction; // HLSL Change - no support for modules
//case GeneratePCH: return new GeneratePCHAction; // HLSL Change - no support for PCH
case GeneratePTH: return new GeneratePTHAction();
case InitOnly: return new InitOnlyAction();
case ParseSyntaxOnly: return new SyntaxOnlyAction();
//case ModuleFileInfo: return new DumpModuleInfoAction(); // HLSL Change - no support for modules
//case VerifyPCH: return new VerifyPCHAction(); // HLSL Change - no support for PCH
case PluginAction: {
for (FrontendPluginRegistry::iterator it =
FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
it != ie; ++it) {
if (it->getName() == CI.getFrontendOpts().ActionName) {
std::unique_ptr<PluginASTAction> P(it->instantiate());
if (!P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs))
return nullptr;
return P.release();
}
}
CI.getDiagnostics().Report(diag::err_fe_invalid_plugin_name)
<< CI.getFrontendOpts().ActionName;
return nullptr;
}
case PrintDeclContext: return new DeclContextPrintAction();
case PrintPreamble: return new PrintPreambleAction();
case PrintPreprocessedInput: {
if (CI.getPreprocessorOutputOpts().RewriteIncludes)
return new RewriteIncludesAction();
return new PrintPreprocessedAction();
}
case RewriteMacros: return new RewriteMacrosAction();
case RewriteTest: return new RewriteTestAction();
#ifdef CLANG_ENABLE_OBJC_REWRITER
case RewriteObjC: return new RewriteObjCAction();
#else
case RewriteObjC: Action = "RewriteObjC"; break;
#endif
#ifdef CLANG_ENABLE_ARCMT
case MigrateSource: return new arcmt::MigrateSourceAction();
#else
case MigrateSource: Action = "MigrateSource"; break;
#endif
#ifdef CLANG_ENABLE_STATIC_ANALYZER
case RunAnalysis: return new ento::AnalysisAction();
#else
case RunAnalysis: Action = "RunAnalysis"; break;
#endif
case RunPreprocessorOnly: return new PreprocessOnlyAction();
}
#if !defined(CLANG_ENABLE_ARCMT) || !defined(CLANG_ENABLE_STATIC_ANALYZER) \
|| !defined(CLANG_ENABLE_OBJC_REWRITER)
CI.getDiagnostics().Report(diag::err_fe_action_not_available) << Action;
return 0;
#else
llvm_unreachable("Invalid program action!");
#endif
}
static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
// Create the underlying action.
FrontendAction *Act = CreateFrontendBaseAction(CI);
if (!Act)
return nullptr;
const FrontendOptions &FEOpts = CI.getFrontendOpts();
if (FEOpts.FixAndRecompile) {
Act = new FixItRecompile(Act);
}
#ifdef CLANG_ENABLE_ARCMT
if (CI.getFrontendOpts().ProgramAction != frontend::MigrateSource &&
CI.getFrontendOpts().ProgramAction != frontend::GeneratePCH) {
// Potentially wrap the base FE action in an ARC Migrate Tool action.
switch (FEOpts.ARCMTAction) {
case FrontendOptions::ARCMT_None:
break;
case FrontendOptions::ARCMT_Check:
Act = new arcmt::CheckAction(Act);
break;
case FrontendOptions::ARCMT_Modify:
Act = new arcmt::ModifyAction(Act);
break;
case FrontendOptions::ARCMT_Migrate:
Act = new arcmt::MigrateAction(Act,
FEOpts.MTMigrateDir,
FEOpts.ARCMTMigrateReportOut,
FEOpts.ARCMTMigrateEmitARCErrors);
break;
}
if (FEOpts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
Act = new arcmt::ObjCMigrateAction(Act, FEOpts.MTMigrateDir,
FEOpts.ObjCMTAction);
}
}
#endif
#if 0 // HLSL Change Starts - no support for AST serialization/deserialization
// If there are any AST files to merge, create a frontend action
// adaptor to perform the merge.
if (!FEOpts.ASTMergeFiles.empty())
Act = new ASTMergeAction(Act, FEOpts.ASTMergeFiles);
#endif // HLSL Change Ends - no support for AST serialization/deserialization
return Act;
}
bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
std::unique_ptr<OptTable> Opts(driver::createDriverOptTable());
Opts->PrintHelp(llvm::outs(), "clang -cc1",
"LLVM 'Clang' Compiler: http://clang.llvm.org", "",
/*Include=*/ driver::options::CC1Option, /*Exclude=*/ 0);
return true;
}
// Honor -version.
//
// FIXME: Use a better -version message?
if (Clang->getFrontendOpts().ShowVersion) {
llvm::cl::PrintVersionMessage();
return true;
}
// Load any requested plugins.
for (unsigned i = 0,
e = Clang->getFrontendOpts().Plugins.size(); i != e; ++i) {
const std::string &Path = Clang->getFrontendOpts().Plugins[i];
std::string Error;
if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
<< Path << Error;
}
// Honor -mllvm.
//
// FIXME: Remove this, one day.
// This should happen AFTER plugins have been loaded!
if (!Clang->getFrontendOpts().LLVMArgs.empty()) {
unsigned NumArgs = Clang->getFrontendOpts().LLVMArgs.size();
auto Args = llvm::make_unique<const char*[]>(NumArgs + 2);
Args[0] = "clang (LLVM option parsing)";
for (unsigned i = 0; i != NumArgs; ++i)
Args[i + 1] = Clang->getFrontendOpts().LLVMArgs[i].c_str();
Args[NumArgs + 1] = nullptr;
llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args.get());
}
#ifdef CLANG_ENABLE_STATIC_ANALYZER
// Honor -analyzer-checker-help.
// This should happen AFTER plugins have been loaded!
if (Clang->getAnalyzerOpts()->ShowCheckerHelp) {
ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
return true;
}
#endif
// If there were errors in processing arguments, don't do anything else.
if (Clang->getDiagnostics().hasErrorOccurred())
return false;
// Create and execute the frontend action.
std::unique_ptr<FrontendAction> Act(CreateFrontendAction(*Clang));
if (!Act)
return false;
bool Success = Clang->ExecuteAction(*Act);
if (Clang->getFrontendOpts().DisableFree)
BuryPointer(std::move(Act));
return Success;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RawBufferMethods.h | //===------ RawBufferMethods.h ---- Raw Buffer Methods ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SPIRV_RAWBUFFERMETHODS_H
#define LLVM_CLANG_SPIRV_RAWBUFFERMETHODS_H
class ASTContext;
class SpirvBuilder;
class SpirvInstruction;
#include "SpirvEmitter.h"
namespace clang {
namespace spirv {
class RawBufferHandler {
public:
RawBufferHandler(SpirvEmitter &emitter)
: theEmitter(emitter), astContext(emitter.getASTContext()),
spvBuilder(emitter.getSpirvBuilder()) {}
/// \brief Performs (RW)ByteAddressBuffer.Load<T>(byteAddress).
/// (RW)ByteAddressBuffers are represented as structs with only one member
/// which is a runtime array in SPIR-V. This method works by loading one or
/// more uints, and performing necessary casts and composite constructions
/// to build the 'targetType'. The layout rule for the result will be `Void`
/// because the value will be built and used internally only. It does not have
/// to match `buffer`.
///
/// Example:
/// targetType = uint16_t, byteAddress=0
/// --> Load the first 16-bit uint starting at byte address 0.
SpirvInstruction *processTemplatedLoadFromBuffer(
SpirvInstruction *buffer, SpirvInstruction *byteAddress,
const QualType targetType, SourceRange range = {});
/// \brief Performs RWByteAddressBuffer.Store<T>(address, value).
/// RWByteAddressBuffers are represented in SPIR-V as structs with only one
/// member which is a runtime array of uints. This method works by decomposing
/// the given |value| to reach numeric/bool types. Then performs necessary
/// casts to uints and stores them in the underlying runtime array.
///
/// Example:
/// targetType = uint16_t, address=0
/// --> Store to the first 16-bit uint starting at address 0.
void processTemplatedStoreToBuffer(SpirvInstruction *value,
SpirvInstruction *buffer,
SpirvInstruction *&byteAddress,
const QualType valueType,
SourceRange range = {});
private:
class BufferAddress {
public:
BufferAddress(SpirvInstruction *&byteAddress, SpirvEmitter &emitter)
: byteAddress(byteAddress), wordIndex(),
spvBuilder(emitter.getSpirvBuilder()),
astContext(emitter.getASTContext()) {}
SpirvInstruction *getByteAddress();
SpirvInstruction *getWordIndex(SourceLocation loc, SourceRange range);
void incrementByteAddress(SpirvInstruction *width, SourceLocation loc,
SourceRange range);
void incrementByteAddress(uint32_t width, SourceLocation loc,
SourceRange range);
void incrementWordIndex(SourceLocation loc, SourceRange range);
private:
SpirvInstruction *byteAddress;
llvm::Optional<SpirvInstruction *> wordIndex;
SpirvBuilder &spvBuilder;
ASTContext &astContext;
};
SpirvInstruction *processTemplatedLoadFromBuffer(SpirvInstruction *buffer,
BufferAddress &address,
const QualType targetType,
SourceRange range = {});
void processTemplatedStoreToBuffer(SpirvInstruction *value,
SpirvInstruction *buffer,
BufferAddress &address,
const QualType valueType,
SourceRange range = {});
SpirvInstruction *load16Bits(SpirvInstruction *buffer, BufferAddress &address,
QualType target16BitType,
SourceRange range = {});
SpirvInstruction *load32Bits(SpirvInstruction *buffer, BufferAddress &address,
QualType target32BitType,
SourceRange range = {});
SpirvInstruction *load64Bits(SpirvInstruction *buffer, BufferAddress &address,
QualType target64BitType,
SourceRange range = {});
private:
void store16Bits(SpirvInstruction *value, SpirvInstruction *buffer,
BufferAddress &address, const QualType valueType,
SourceRange range = {});
void store32Bits(SpirvInstruction *value, SpirvInstruction *buffer,
BufferAddress &address, const QualType valueType,
SourceRange range = {});
void store64Bits(SpirvInstruction *value, SpirvInstruction *buffer,
BufferAddress &address, const QualType valueType,
SourceRange range = {});
/// \brief Serializes the given values into their components until a scalar or
/// a struct has been reached. Returns the most basic type it reaches.
QualType serializeToScalarsOrStruct(std::deque<SpirvInstruction *> *values,
QualType valueType, SourceLocation,
SourceRange range = {});
private:
/// \brief Performs an OpBitCast from |fromType| to |toType| on the given
/// instruction.
///
/// If the |toType| is a boolean type, it performs a regular type cast.
///
/// If the |fromType| and |toType| are the same, does not thing and returns
/// the given instruction
SpirvInstruction *bitCastToNumericalOrBool(SpirvInstruction *instr,
QualType fromType, QualType toType,
SourceLocation loc,
SourceRange range = {});
private:
SpirvEmitter &theEmitter;
ASTContext &astContext;
SpirvBuilder &spvBuilder;
};
} // namespace spirv
} // namespace clang
#endif // LLVM_CLANG_SPIRV_RAWBUFFERMETHODS_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvBasicBlock.cpp | //===--- SpirvBasicBlock.cpp - SPIR-V Basic Block Implementation -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvBasicBlock.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
SpirvBasicBlock::SpirvBasicBlock(llvm::StringRef name)
: labelId(0), labelName(name), mergeTarget(nullptr),
continueTarget(nullptr), debugScope(nullptr) {}
SpirvBasicBlock::~SpirvBasicBlock() {
for (auto instructionNode : instructions)
instructionNode.instruction->releaseMemory();
if (debugScope)
debugScope->releaseMemory();
}
bool SpirvBasicBlock::hasTerminator() const {
return !instructions.empty() &&
isa<SpirvTerminator>(instructions.back().instruction);
}
bool SpirvBasicBlock::invokeVisitor(
Visitor *visitor, llvm::ArrayRef<SpirvVariable *> vars,
SpirvDebugScope *functionScope,
llvm::ArrayRef<SpirvDebugDeclare *> debugDeclares, bool reverseOrder) {
if (!visitor->visit(this, Visitor::Phase::Init))
return false;
const bool debugInfoVulkan = visitor->getCodeGenOptions().debugInfoVulkan;
// OpenCL.DebugInfo.100 allows instructions outside of basic blocks, so we can
// emit the scope early here.
if (!debugInfoVulkan) {
if (debugScope && !visitor->visit(debugScope))
return false;
// Note - when emitting OpenCL.DebugInfo.100 we do not expect to have a
// functionScope to emit, nor any debugDeclares. These should have been
// emitted in the parent SpirvFunction - see SpirvFunction::invokeVisitor()
assert(functionScope == nullptr &&
"Expected no functionScope when emitting OpenCL.DebugInfo.100");
assert(debugDeclares.empty() &&
"Expected no debugDeclares when emitting OpenCL.DebugInfo.100");
}
if (reverseOrder) {
for (auto iter = instructions.rbegin(); iter != instructions.rend();
++iter) {
if (!iter->instruction->invokeVisitor(visitor))
return false;
}
// For NonSemantic.Shader.DebugInfo.100 emit the block's scope only if we
// didn't have a function scope, or if the
// scopes are different. This means we don't emit redundant scope
if (debugInfoVulkan) {
if (debugScope && (!functionScope ||
functionScope->getScope() != debugScope->getScope())) {
if (!visitor->visit(debugScope))
return false;
}
}
if (!debugDeclares.empty()) {
for (auto decl = debugDeclares.rbegin(); decl != debugDeclares.rend();
++decl) {
if (!(*decl)->invokeVisitor(visitor))
return false;
}
}
if (functionScope && !visitor->visit(functionScope))
return false;
// If a basic block is the first basic block of a function, it should
// include all the variables of the function.
if (!vars.empty()) {
for (auto var = vars.rbegin(); var != vars.rend(); ++var) {
if (!(*var)->invokeVisitor(visitor))
return false;
}
}
} else {
// If a basic block is the first basic block of a function, it should
// include all the variables of the function.
if (!vars.empty()) {
for (auto *var : vars) {
if (!var->invokeVisitor(visitor))
return false;
}
}
if (functionScope && !visitor->visit(functionScope))
return false;
if (!debugDeclares.empty()) {
for (auto *decl : debugDeclares) {
if (!decl->invokeVisitor(visitor))
return false;
}
}
// For NonSemantic.Shader.DebugInfo.100 emit the block's scope only if we
// didn't have a function scope, or if the
// scopes are different. This means we don't emit redundant scope
if (debugInfoVulkan) {
if (debugScope && (!functionScope ||
functionScope->getScope() != debugScope->getScope())) {
if (!visitor->visit(debugScope))
return false;
}
}
for (auto iter = instructions.begin(); iter != instructions.end(); ++iter) {
if (!iter->instruction->invokeVisitor(visitor))
return false;
}
}
if (!visitor->visit(this, Visitor::Phase::Done))
return false;
return true;
}
void SpirvBasicBlock::addSuccessor(SpirvBasicBlock *bb) {
assert(bb && "cannot add null basic block as successor");
successors.push_back(bb);
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RemoveBufferBlockVisitor.cpp | //===-- RemoveBufferBlockVisitor.cpp - RemoveBufferBlock Visitor -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "RemoveBufferBlockVisitor.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvFunction.h"
namespace clang {
namespace spirv {
bool RemoveBufferBlockVisitor::isBufferBlockDecorationAvailable() {
return !featureManager.isTargetEnvSpirv1p4OrAbove();
}
bool RemoveBufferBlockVisitor::visit(SpirvModule *mod, Phase phase) {
// The BufferBlock decoration requires SPIR-V version 1.3 or earlier. It
// should be removed from the module on newer versions. Otherwise, no action
// is needed by this IMR visitor.
if (phase == Visitor::Phase::Init)
if (isBufferBlockDecorationAvailable())
return false;
return true;
}
bool RemoveBufferBlockVisitor::hasStorageBufferInterfaceType(
const SpirvType *type) {
while (type != nullptr) {
if (const auto *structType = dyn_cast<StructType>(type)) {
return structType->getInterfaceType() ==
StructInterfaceType::StorageBuffer;
} else if (const auto *elemType = dyn_cast<ArrayType>(type)) {
type = elemType->getElementType();
} else if (const auto *elemType = dyn_cast<RuntimeArrayType>(type)) {
type = elemType->getElementType();
} else {
return false;
}
}
return false;
}
bool RemoveBufferBlockVisitor::visitInstruction(SpirvInstruction *inst) {
if (!inst->getResultType())
return true;
// OpAccessChain can obtain pointers to any type. Its result type is
// OpTypePointer, and it should get the same storage class as its base.
if (auto *accessChain = dyn_cast<SpirvAccessChain>(inst)) {
auto *accessChainType = accessChain->getResultType();
auto *baseType = accessChain->getBase()->getResultType();
// The result type of OpAccessChain and the result type of its base must be
// OpTypePointer.
assert(isa<SpirvPointerType>(accessChainType));
assert(isa<SpirvPointerType>(baseType));
auto *accessChainPtr = dyn_cast<SpirvPointerType>(accessChainType);
auto *basePtr = dyn_cast<SpirvPointerType>(baseType);
auto baseStorageClass = basePtr->getStorageClass();
if (accessChainPtr->getStorageClass() != baseStorageClass) {
auto *newAccessChainType = context.getPointerType(
accessChainPtr->getPointeeType(), baseStorageClass);
inst->setStorageClass(baseStorageClass);
inst->setResultType(newAccessChainType);
}
}
// For all instructions, if the result type is a pointer pointing to a struct
// with StorageBuffer interface, the storage class must be updated.
const auto *instType = inst->getResultType();
const auto *newInstType = instType;
spv::StorageClass newInstStorageClass = spv::StorageClass::Max;
if (updateStorageClass(instType, &newInstType, &newInstStorageClass)) {
inst->setResultType(newInstType);
inst->setStorageClass(newInstStorageClass);
}
return true;
}
bool RemoveBufferBlockVisitor::updateStorageClass(
const SpirvType *type, const SpirvType **newType,
spv::StorageClass *newStorageClass) {
// Update pointer types.
if (const auto *ptrType = dyn_cast<SpirvPointerType>(type)) {
const auto *innerType = ptrType->getPointeeType();
// For pointees with storage buffer interface, update pointer storage class.
if (hasStorageBufferInterfaceType(innerType) &&
ptrType->getStorageClass() != spv::StorageClass::StorageBuffer) {
*newType =
context.getPointerType(innerType, spv::StorageClass::StorageBuffer);
*newStorageClass = spv::StorageClass::StorageBuffer;
return true;
}
// Update storage class of pointee, if applicable.
const auto *newInnerType = innerType;
spv::StorageClass newInnerSC = spv::StorageClass::Max;
if (updateStorageClass(innerType, &newInnerType, &newInnerSC)) {
*newType =
context.getPointerType(newInnerType, ptrType->getStorageClass());
*newStorageClass = ptrType->getStorageClass();
return true;
}
}
// Update struct types.
if (const auto *structType = dyn_cast<StructType>(type)) {
bool transformed = false;
llvm::SmallVector<StructType::FieldInfo, 2> newFields;
// Update storage class of each field, if applicable.
for (auto field : structType->getFields()) {
const auto *newFieldType = field.type;
spv::StorageClass newFieldSC = spv::StorageClass::Max;
transformed |= updateStorageClass(field.type, &newFieldType, &newFieldSC);
field.type = newFieldType;
newFields.push_back(field);
}
*newType =
context.getStructType(llvm::ArrayRef<StructType::FieldInfo>(newFields),
structType->getStructName());
*newStorageClass = spv::StorageClass::StorageBuffer;
return transformed;
}
// TODO: Handle other composite types.
return false;
}
bool RemoveBufferBlockVisitor::visit(SpirvFunction *fn, Phase phase) {
if (phase == Visitor::Phase::Init) {
llvm::SmallVector<const SpirvType *, 4> paramTypes;
bool updatedParamTypes = false;
for (auto *param : fn->getParameters()) {
const auto *paramType = param->getResultType();
// This pass is run after all types are lowered.
assert(paramType != nullptr);
// Update the parameter type if needed (update storage class of pointers).
const auto *newParamType = paramType;
spv::StorageClass newParamSC = spv::StorageClass::Max;
if (updateStorageClass(paramType, &newParamType, &newParamSC)) {
param->setStorageClass(newParamSC);
param->setResultType(newParamType);
updatedParamTypes = true;
}
paramTypes.push_back(newParamType);
}
// Update the return type if needed (update storage class of pointers).
const auto *returnType = fn->getReturnType();
const auto *newReturnType = returnType;
spv::StorageClass newReturnSC = spv::StorageClass::Max;
bool updatedReturnType =
updateStorageClass(returnType, &newReturnType, &newReturnSC);
if (updatedReturnType) {
fn->setReturnType(newReturnType);
}
if (updatedParamTypes || updatedReturnType) {
fn->setFunctionType(context.getFunctionType(newReturnType, paramTypes));
}
return true;
}
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RemoveBufferBlockVisitor.h | //===--- RemoveBufferBlockVisitor.h - RemoveBufferBlock Visitor --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_REMOVEBUFFERBLOCKVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_REMOVEBUFFERBLOCKVISITOR_H
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class SpirvContext;
class RemoveBufferBlockVisitor : public Visitor {
public:
RemoveBufferBlockVisitor(ASTContext &astCtx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts,
FeatureManager &featureMgr)
: Visitor(opts, spvCtx), featureManager(featureMgr) {}
bool visit(SpirvModule *, Phase) override;
bool visit(SpirvFunction *, Phase) override;
using Visitor::visit;
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override;
private:
/// Returns true if |type| is a SPIR-V type whose interface type is
/// StorageBuffer.
bool hasStorageBufferInterfaceType(const SpirvType *type);
/// Returns true if the BufferBlock decoration is available (SPIR-V 1.3
/// or below).
bool isBufferBlockDecorationAvailable();
/// Transforms the given |type| if it is one of the following cases:
///
/// 1- a pointer to a structure with StorageBuffer interface
/// 2- a pointer to a pointer to a structure with StorageBuffer interface
/// 3- a pointer to a struct containing a structure with StorageBuffer
/// interface
///
/// by updating the storage class of the pointer whose pointee is the struct.
///
/// Example of case (1):
/// type: _ptr_Uniform_StructuredBuffer_float
/// new type: _ptr_StorageBuffer_StructuredBuffer_float
/// new storage class: StorageBuffer
///
/// Example of case (2):
/// type: _ptr_Function__ptr_Uniform_StructuredBuffer_float
/// new type: _ptr_Function__ptr_StorageBuffer_StructuredBuffer_float
/// new storage class: Function
///
/// Example of case (3):
/// type: _ptr_Function_Struct
/// where %Struct = OpTypeStruct
/// %_ptr_Uniform_type_StructuredBuffer_float
/// new type: _ptr_Function_Struct
/// where %Struct = OpTypeStruct
/// %_ptr_StorageBuffer_type_StructuredBuffer_float
/// new storage class: Function
///
/// If |type| is transformed, the |newType| and |newStorageClass| are
/// returned by reference and the function returns true.
///
/// If |type| is not transformed, |newType| and |newStorageClass| are
/// untouched, and the function returns false.
bool updateStorageClass(const SpirvType *type, const SpirvType **newType,
spv::StorageClass *newStorageClass);
FeatureManager featureManager;
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_REMOVEBUFFERBLOCKVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvType.cpp | //===-- SpirvType.cpp - SPIR-V Type Hierarchy -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements the in-memory representation of SPIR-V types.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvType.h"
#include "clang/SPIRV/SpirvInstruction.h"
#include <sstream>
namespace clang {
namespace spirv {
bool ScalarType::classof(const SpirvType *t) {
switch (t->getKind()) {
case TK_Bool:
case TK_Integer:
case TK_Float:
return true;
default:
break;
}
return false;
}
bool SpirvType::isTexture(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
const auto dim = imageType->getDimension();
const auto withSampler = imageType->withSampler();
return (withSampler == ImageType::WithSampler::Yes) &&
(dim == spv::Dim::Dim1D || dim == spv::Dim::Dim2D ||
dim == spv::Dim::Dim3D || dim == spv::Dim::Cube);
}
return false;
}
bool SpirvType::isRWTexture(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
const auto dim = imageType->getDimension();
const auto withSampler = imageType->withSampler();
return (withSampler == ImageType::WithSampler::No) &&
(dim == spv::Dim::Dim1D || dim == spv::Dim::Dim2D ||
dim == spv::Dim::Dim3D);
}
return false;
}
bool SpirvType::isSampler(const SpirvType *type) {
return isa<SamplerType>(type);
}
bool SpirvType::isBuffer(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
return imageType->getDimension() == spv::Dim::Buffer &&
imageType->withSampler() == ImageType::WithSampler::Yes;
}
return false;
}
bool SpirvType::isRWBuffer(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
return imageType->getDimension() == spv::Dim::Buffer &&
imageType->withSampler() == ImageType::WithSampler::No;
}
return false;
}
bool SpirvType::isSubpassInput(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
return imageType->getDimension() == spv::Dim::SubpassData &&
imageType->isMSImage() == false;
}
return false;
}
bool SpirvType::isSubpassInputMS(const SpirvType *type) {
if (const auto *imageType = dyn_cast<ImageType>(type)) {
return imageType->getDimension() == spv::Dim::SubpassData &&
imageType->isMSImage() == true;
}
return false;
}
bool SpirvType::isResourceType(const SpirvType *type) {
if (isa<ImageType>(type) || isa<SamplerType>(type) ||
isa<AccelerationStructureTypeNV>(type))
return true;
if (const auto *structType = dyn_cast<StructType>(type))
return structType->getInterfaceType() !=
StructInterfaceType::InternalStorage;
if (const auto *pointerType = dyn_cast<SpirvPointerType>(type))
return isResourceType(pointerType->getPointeeType());
return false;
}
MatrixType::MatrixType(const VectorType *vecType, uint32_t vecCount)
: SpirvType(TK_Matrix), vectorType(vecType), vectorCount(vecCount) {}
bool MatrixType::operator==(const MatrixType &that) const {
return vectorType == that.vectorType && vectorCount == that.vectorCount;
}
ImageType::ImageType(const NumericalType *type, spv::Dim dim, WithDepth depth,
bool arrayed, bool ms, WithSampler sampled,
spv::ImageFormat format)
: SpirvType(TK_Image, getImageName(dim, arrayed)), sampledType(type),
dimension(dim), imageDepth(depth), isArrayed(arrayed), isMultiSampled(ms),
isSampled(sampled), imageFormat(format) {}
std::string ImageType::getImageName(spv::Dim dim, bool arrayed) {
const char *dimStr = "";
switch (dim) {
case spv::Dim::Dim1D:
dimStr = "1d.";
break;
case spv::Dim::Dim2D:
dimStr = "2d.";
break;
case spv::Dim::Dim3D:
dimStr = "3d.";
break;
case spv::Dim::Cube:
dimStr = "cube.";
break;
case spv::Dim::Rect:
dimStr = "rect.";
break;
case spv::Dim::Buffer:
dimStr = "buffer.";
break;
case spv::Dim::SubpassData:
dimStr = "subpass.";
break;
default:
break;
}
std::ostringstream name;
name << "type." << dimStr << "image" << (arrayed ? ".array" : "");
return name.str();
}
bool ImageType::operator==(const ImageType &that) const {
return sampledType == that.sampledType && dimension == that.dimension &&
imageDepth == that.imageDepth && isArrayed == that.isArrayed &&
isMultiSampled == that.isMultiSampled && isSampled == that.isSampled &&
imageFormat == that.imageFormat;
}
bool ArrayType::operator==(const ArrayType &that) const {
return elementType == that.elementType && elementCount == that.elementCount &&
stride.hasValue() == that.stride.hasValue() &&
(!stride.hasValue() || stride.getValue() == that.stride.getValue());
}
bool RuntimeArrayType::operator==(const RuntimeArrayType &that) const {
return elementType == that.elementType &&
stride.hasValue() == that.stride.hasValue() &&
(!stride.hasValue() || stride.getValue() == that.stride.getValue());
}
bool SpvIntrinsicTypeOperand::operator==(
const SpvIntrinsicTypeOperand &that) const {
if (isTypeOperand != that.isTypeOperand)
return false;
if (isTypeOperand) {
return operand_as_type == that.operand_as_type;
} else {
auto constantInst = dyn_cast<SpirvConstant>(operand_as_inst);
assert(constantInst != nullptr);
auto thatConstantInst = dyn_cast<SpirvConstant>(that.operand_as_inst);
assert(thatConstantInst != nullptr);
return *constantInst == *thatConstantInst;
}
}
SpirvIntrinsicType::SpirvIntrinsicType(
unsigned typeOp, llvm::ArrayRef<SpvIntrinsicTypeOperand> inOps)
: SpirvType(TK_SpirvIntrinsicType, "spirvIntrinsicType"),
typeOpCode(typeOp), operands(inOps.begin(), inOps.end()) {}
StructType::StructType(llvm::ArrayRef<StructType::FieldInfo> fieldsVec,
llvm::StringRef name, bool isReadOnly,
StructInterfaceType iface)
: SpirvType(TK_Struct, name), fields(fieldsVec.begin(), fieldsVec.end()),
readOnly(isReadOnly), interfaceType(iface) {}
bool StructType::FieldInfo::operator==(
const StructType::FieldInfo &that) const {
return type == that.type && offset.hasValue() == that.offset.hasValue() &&
matrixStride.hasValue() == that.matrixStride.hasValue() &&
isRowMajor.hasValue() == that.isRowMajor.hasValue() &&
name == that.name &&
// Either not have offset value, or have the same value
(!offset.hasValue() || offset.getValue() == that.offset.getValue()) &&
// Either not have matrix stride value, or have the same value
(!matrixStride.hasValue() ||
matrixStride.getValue() == that.matrixStride.getValue()) &&
// Either not have row major value, or have the same value
(!isRowMajor.hasValue() ||
isRowMajor.getValue() == that.isRowMajor.getValue()) &&
// Both should have the same precision
isRelaxedPrecision == that.isRelaxedPrecision &&
// Both fields should be precise or not precise
isPrecise == that.isPrecise;
}
bool StructType::operator==(const StructType &that) const {
return fields == that.fields && getName() == that.getName() &&
readOnly == that.readOnly && interfaceType == that.interfaceType;
}
HybridStructType::HybridStructType(
llvm::ArrayRef<HybridStructType::FieldInfo> fieldsVec, llvm::StringRef name,
bool isReadOnly, StructInterfaceType iface)
: HybridType(TK_HybridStruct, name),
fields(fieldsVec.begin(), fieldsVec.end()), readOnly(isReadOnly),
interfaceType(iface) {}
FunctionType::FunctionType(const SpirvType *ret,
llvm::ArrayRef<const SpirvType *> param)
: SpirvType(TK_Function), returnType(ret),
paramTypes(param.begin(), param.end()) {
// Make sure
assert(!isa<HybridType>(ret));
for (auto *paramType : param) {
(void)paramType;
assert(!isa<HybridType>(paramType));
}
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/DebugTypeVisitor.cpp | //===--- DebugTypeVisitor.cpp - SPIR-V type to debug type impl ---*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <sstream>
#include "DebugTypeVisitor.h"
#include "LowerTypeVisitor.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "clang/SPIRV/SpirvModule.h"
namespace clang {
namespace spirv {
void addTemplateTypeAndItsParamsToModule(SpirvModule *module,
SpirvDebugTypeTemplate *tempType) {
assert(module && "module is nullptr");
assert(tempType && "tempType is nullptr");
for (auto *param : tempType->getParams()) {
module->addDebugInfo(param);
}
module->addDebugInfo(tempType);
}
void DebugTypeVisitor::setDefaultDebugInfo(SpirvDebugInstruction *instr) {
instr->setAstResultType(astContext.VoidTy);
instr->setResultType(context.getVoidType());
instr->setInstructionSet(
spvBuilder.getDebugInfoExtInstSet(spvOptions.debugInfoVulkan));
}
SpirvDebugInfoNone *DebugTypeVisitor::getDebugInfoNone() {
auto *debugNone = spvBuilder.getOrCreateDebugInfoNone();
setDefaultDebugInfo(debugNone);
return debugNone;
}
SpirvDebugTypeComposite *DebugTypeVisitor::createDebugTypeComposite(
const SpirvType *type, const SourceLocation &loc, uint32_t tag) {
const auto &sm = astContext.getSourceManager();
uint32_t line = sm.getPresumedLineNumber(loc);
uint32_t column = sm.getPresumedColumnNumber(loc);
StringRef linkageName = type->getName();
// TODO: Update linkageName using astContext.createMangleContext().
std::string name = type->getName();
RichDebugInfo *debugInfo = &spvContext.getDebugInfo().begin()->second;
const char *file = sm.getPresumedLoc(loc).getFilename();
if (file) {
auto &debugInfoMap = spvContext.getDebugInfo();
auto it = debugInfoMap.find(file);
if (it != debugInfoMap.end()) {
debugInfo = &it->second;
} else {
auto *dbgSrc = spvBuilder.createDebugSource(file);
setDefaultDebugInfo(dbgSrc);
auto dbgCompUnit = spvBuilder.createDebugCompilationUnit(dbgSrc);
setDefaultDebugInfo(dbgCompUnit);
debugInfo =
&debugInfoMap.insert({file, RichDebugInfo(dbgSrc, dbgCompUnit)})
.first->second;
}
}
return spvContext.getDebugTypeComposite(
type, name, debugInfo->source, line, column,
/* parent */ debugInfo->compilationUnit, linkageName, 3u, tag);
}
void DebugTypeVisitor::addDebugTypeForMemberVariables(
SpirvDebugTypeComposite *debugTypeComposite, const StructType *type,
llvm::function_ref<SourceLocation()> location, unsigned numBases) {
llvm::SmallVector<SpirvDebugInstruction *, 4> members;
uint32_t compositeSizeInBits = 0;
const auto &sm = astContext.getSourceManager();
for (auto &field : type->getFields()) {
// Skip base classes
// TODO: Handle class inheritance correctly.
if (numBases != 0) {
--numBases;
continue;
}
// Lower this member's debug type.
auto *memberDebugType = lowerToDebugType(field.type);
// TODO: We are currently in the discussion about how to handle
// a variable type with unknown physical layout. Add proper flags
// or operations for variables with the unknown physical layout.
// For example, we do not have physical layout for a local variable.
// Get offset (in bits) of this member within the composite.
uint32_t offsetInBits =
field.offset.hasValue() ? *field.offset * 8 : compositeSizeInBits;
// Get size (in bits) of this member within the composite.
uint32_t sizeInBits = field.sizeInBytes.hasValue()
? *field.sizeInBytes * 8
: memberDebugType->getSizeInBits();
const SourceLocation loc = location();
uint32_t line = sm.getPresumedLineNumber(loc);
uint32_t column = sm.getPresumedColumnNumber(loc);
// TODO: Replace 2u and 3u with valid flags when debug info extension is
// placed in SPIRV-Header.
auto *debugInstr = spvContext.getDebugTypeMember(
field.name, memberDebugType, debugTypeComposite->getSource(), line,
column, debugTypeComposite,
/* flags */ 3u, offsetInBits, sizeInBits, /* value */ nullptr);
assert(debugInstr);
setDefaultDebugInfo(debugInstr);
members.push_back(debugInstr);
compositeSizeInBits = offsetInBits + sizeInBits;
}
debugTypeComposite->setMembers(members);
debugTypeComposite->setSizeInBits(compositeSizeInBits);
}
void DebugTypeVisitor::lowerDebugTypeMembers(
SpirvDebugTypeComposite *debugTypeComposite, const StructType *type,
const DeclContext *decl) {
if (const auto *recordDecl = dyn_cast<RecordDecl>(decl)) {
auto fieldIter = recordDecl->field_begin();
auto fieldEnd = recordDecl->field_end();
unsigned numBases = 0;
if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(recordDecl))
numBases = cxxRecordDecl->getNumBases();
addDebugTypeForMemberVariables(
debugTypeComposite, type,
[&fieldIter, &fieldEnd]() {
assert(fieldIter != fieldEnd);
(void)fieldEnd;
auto location = fieldIter->getLocation();
++fieldIter;
return location;
},
numBases);
} else if (const auto *hlslBufferDecl = dyn_cast<HLSLBufferDecl>(decl)) {
auto subDeclIter = hlslBufferDecl->decls_begin();
auto subDeclEnd = hlslBufferDecl->decls_end();
addDebugTypeForMemberVariables(
debugTypeComposite, type,
[&subDeclIter, &subDeclEnd]() {
assert(subDeclIter != subDeclEnd);
(void)subDeclEnd;
auto location = subDeclIter->getLocation();
++subDeclIter;
return location;
},
0);
} else {
assert(false && "Uknown DeclContext for DebugTypeMember generation");
}
// Note:
// Generating forward references is possible for non-semantic debug info,
// but not when using OpenCL.DebugInfo.100.
// Doing so would go against the SPIR-V spec.
// See https://github.com/KhronosGroup/SPIRV-Registry/issues/203
if (!spvOptions.debugInfoVulkan)
return;
// Push member functions to DebugTypeComposite Members operand.
for (auto *subDecl : decl->decls()) {
if (const auto *methodDecl = dyn_cast<FunctionDecl>(subDecl)) {
// TODO: if dbgFunction is NULL, it is a member function without
// function calls. We have to generate its type and insert it to
// members.
if (auto *dbgFunction = spvContext.getDebugFunctionForDecl(methodDecl)) {
dbgFunction->setParent(debugTypeComposite);
debugTypeComposite->appendMember(dbgFunction);
}
}
}
}
SpirvDebugTypeTemplate *DebugTypeVisitor::lowerDebugTypeTemplate(
const ClassTemplateSpecializationDecl *templateDecl,
SpirvDebugTypeComposite *debugTypeComposite) {
// Reuse already lowered DebugTypeTemplate.
auto *debugTypeTemplate = spvContext.getDebugTypeTemplate(templateDecl);
if (debugTypeTemplate != nullptr)
return debugTypeTemplate;
llvm::SmallVector<SpirvDebugTypeTemplateParameter *, 2> tempTypeParams;
const auto &argList = templateDecl->getTemplateArgs();
for (unsigned i = 0; i < argList.size(); ++i) {
// Reuse already lowered DebugTypeTemplateParameter.
auto *debugTypeTemplateParam =
spvContext.getDebugTypeTemplateParameter(&argList[i]);
if (debugTypeTemplateParam != nullptr) {
tempTypeParams.push_back(debugTypeTemplateParam);
continue;
}
// TODO: Handle other kinds e.g., value, template template type.
if (argList[i].getKind() != clang::TemplateArgument::ArgKind::Type)
continue;
// Lower DebugTypeTemplateParameter.
const auto *spvType = spvTypeVisitor.lowerType(
argList[i].getAsType(), currentDebugInstructionLayoutRule, llvm::None,
debugTypeComposite->getSourceLocation());
debugTypeTemplateParam = spvContext.createDebugTypeTemplateParameter(
&argList[i], "TemplateParam", lowerToDebugType(spvType),
getDebugInfoNone(), debugTypeComposite->getSource(),
debugTypeComposite->getLine(), debugTypeComposite->getColumn());
tempTypeParams.push_back(debugTypeTemplateParam);
setDefaultDebugInfo(debugTypeTemplateParam);
}
debugTypeTemplate = spvContext.createDebugTypeTemplate(
templateDecl, debugTypeComposite, tempTypeParams);
setDefaultDebugInfo(debugTypeTemplate);
return debugTypeTemplate;
}
SpirvDebugType *
DebugTypeVisitor::lowerToDebugTypeComposite(const SpirvType *type) {
const auto *decl = spvContext.getStructDeclForSpirvType(type);
assert(decl != nullptr && "Lowering DebugTypeComposite needs DeclContext");
uint32_t tag = 1u;
if (const auto *recordDecl = dyn_cast<RecordDecl>(decl)) {
if (recordDecl->isStruct())
tag = 1;
else if (recordDecl->isClass())
tag = 0;
else if (recordDecl->isUnion())
tag = 2;
else
assert(!"DebugTypeComposite must be a struct, class, or union.");
}
SourceLocation loc = {};
if (const auto *declDecl = dyn_cast<Decl>(decl))
loc = declDecl->getLocation();
auto *debugTypeComposite = createDebugTypeComposite(type, loc, tag);
setDefaultDebugInfo(debugTypeComposite);
if (const auto *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(decl)) {
// The size of an opaque type must be DebugInfoNone and its name must
// start with "@".
debugTypeComposite->markAsOpaqueType(getDebugInfoNone());
return lowerDebugTypeTemplate(templateDecl, debugTypeComposite);
} else {
// If SpirvType is StructType, it is a normal struct/class. Otherwise,
// it must be an image or a sampler type that is an opaque type.
if (const StructType *structType = dyn_cast<StructType>(type))
lowerDebugTypeMembers(debugTypeComposite, structType, decl);
else
debugTypeComposite->markAsOpaqueType(getDebugInfoNone());
return debugTypeComposite;
}
}
SpirvDebugType *DebugTypeVisitor::lowerToDebugType(const SpirvType *spirvType) {
SpirvDebugType *debugType = nullptr;
switch (spirvType->getKind()) {
case SpirvType::TK_Bool: {
llvm::StringRef name = "bool";
// TODO: Should we use 1 bit for booleans or 32 bits?
uint32_t size = 32;
// TODO: Use enums rather than uint32_t.
uint32_t encoding = 2u;
SpirvConstant *sizeInstruction = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, size));
sizeInstruction->setResultType(spvContext.getUIntType(32));
debugType = spvContext.getDebugTypeBasic(spirvType, name, sizeInstruction,
encoding);
break;
}
case SpirvType::TK_Integer: {
auto *intType = dyn_cast<IntegerType>(spirvType);
const uint32_t size = intType->getBitwidth();
const bool isSigned = intType->isSignedInt();
SpirvConstant *sizeInstruction = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, size));
sizeInstruction->setResultType(spvContext.getUIntType(32));
// TODO: Use enums rather than uint32_t.
uint32_t encoding = isSigned ? 4u : 6u;
std::string debugName = "";
if (size == 32) {
debugName = isSigned ? "int" : "uint";
} else {
std::ostringstream stream;
stream << (isSigned ? "int" : "uint") << size << "_t";
debugName = stream.str();
}
debugType = spvContext.getDebugTypeBasic(spirvType, debugName,
sizeInstruction, encoding);
break;
}
case SpirvType::TK_Float: {
auto *floatType = dyn_cast<FloatType>(spirvType);
const uint32_t size = floatType->getBitwidth();
SpirvConstant *sizeInstruction = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, size));
sizeInstruction->setResultType(spvContext.getUIntType(32));
// TODO: Use enums rather than uint32_t.
uint32_t encoding = 3u;
std::string debugName = "";
if (size == 32) {
debugName = "float";
} else {
std::ostringstream stream;
stream << "float" << size << "_t";
debugName = stream.str();
}
debugType = spvContext.getDebugTypeBasic(spirvType, debugName,
sizeInstruction, encoding);
break;
}
case SpirvType::TK_Image:
case SpirvType::TK_Sampler:
case SpirvType::TK_Struct: {
debugType = lowerToDebugTypeComposite(spirvType);
break;
}
// TODO: Add DebugTypeComposite for class and union.
// TODO: Add DebugTypeEnum.
case SpirvType::TK_Array: {
auto *arrType = dyn_cast<ArrayType>(spirvType);
SpirvDebugInstruction *elemDebugType =
lowerToDebugType(arrType->getElementType());
llvm::SmallVector<uint32_t, 4> counts;
if (auto *dbgArrType = dyn_cast<SpirvDebugTypeArray>(elemDebugType)) {
counts.insert(counts.end(), dbgArrType->getElementCount().begin(),
dbgArrType->getElementCount().end());
elemDebugType = dbgArrType->getElementType();
}
counts.push_back(arrType->getElementCount());
debugType = spvContext.getDebugTypeArray(spirvType, elemDebugType, counts);
break;
}
case SpirvType::TK_RuntimeArray: {
auto *arrType = dyn_cast<RuntimeArrayType>(spirvType);
SpirvDebugInstruction *elemDebugType =
lowerToDebugType(arrType->getElementType());
llvm::SmallVector<uint32_t, 4> counts;
counts.push_back(0u);
debugType = spvContext.getDebugTypeArray(spirvType, elemDebugType, counts);
break;
}
case SpirvType::TK_Vector: {
auto *vecType = dyn_cast<VectorType>(spirvType);
SpirvDebugInstruction *elemDebugType =
lowerToDebugType(vecType->getElementType());
debugType = spvContext.getDebugTypeVector(spirvType, elemDebugType,
vecType->getElementCount());
break;
}
case SpirvType::TK_Matrix: {
auto *matType = dyn_cast<MatrixType>(spirvType);
if (spvOptions.debugInfoVulkan) {
SpirvDebugInstruction *vecDebugType =
lowerToDebugType(matType->getVecType());
debugType = spvContext.getDebugTypeMatrix(spirvType, vecDebugType,
matType->numCols());
} else {
SpirvDebugInstruction *elemDebugType =
lowerToDebugType(matType->getElementType());
debugType = spvContext.getDebugTypeArray(
spirvType, elemDebugType, {matType->numRows(), matType->numCols()});
}
break;
}
case SpirvType::TK_Pointer: {
debugType = lowerToDebugType(
dyn_cast<SpirvPointerType>(spirvType)->getPointeeType());
break;
}
case SpirvType::TK_Function: {
auto *fnType = dyn_cast<FunctionType>(spirvType);
// Special case: There is no DebugType for void. So if the function return
// type is void, we set it to nullptr.
SpirvDebugType *returnType = nullptr;
if (!isa<VoidType>(fnType->getReturnType())) {
auto *loweredRetTy = lowerToDebugType(fnType->getReturnType());
returnType = dyn_cast<SpirvDebugType>(loweredRetTy);
assert(returnType && "Function return type info must be SpirvDebugType");
}
llvm::SmallVector<SpirvDebugType *, 4> params;
for (const auto *paramType : fnType->getParamTypes()) {
params.push_back(dyn_cast<SpirvDebugType>(lowerToDebugType(paramType)));
}
// TODO: Add mechanism to properly calculate the flags.
// The info needed probably resides in clang::FunctionDecl.
// This info can either be stored in the SpirvFunction class. Or,
// alternatively the info can be stored in the SpirvContext.
const uint32_t flags = 3u;
debugType =
spvContext.getDebugTypeFunction(spirvType, flags, returnType, params);
break;
}
case SpirvType::TK_AccelerationStructureNV: {
debugType = lowerToDebugTypeComposite(spirvType);
break;
}
}
if (!debugType) {
emitError("Fail to lower SpirvType %0 to a debug type")
<< spirvType->getName();
return nullptr;
}
setDefaultDebugInfo(debugType);
return debugType;
}
bool DebugTypeVisitor::visitInstruction(SpirvInstruction *instr) {
if (auto *debugInstr = dyn_cast<SpirvDebugInstruction>(instr)) {
setDefaultDebugInfo(debugInstr);
// The following instructions are the only debug instructions that contain a
// debug type:
// DebugGlobalVariable
// DebugLocalVariable
// DebugFunction
if (isa<SpirvDebugGlobalVariable>(debugInstr) ||
isa<SpirvDebugLocalVariable>(debugInstr)) {
currentDebugInstructionLayoutRule = debugInstr->getLayoutRule();
const SpirvType *spirvType = debugInstr->getDebugSpirvType();
assert(spirvType != nullptr);
SpirvDebugInstruction *debugType = lowerToDebugType(spirvType);
debugInstr->setDebugType(debugType);
}
if (auto *debugFunction = dyn_cast<SpirvDebugFunction>(debugInstr)) {
currentDebugInstructionLayoutRule = SpirvLayoutRule::Void;
const SpirvType *spirvType =
debugFunction->getSpirvFunction()->getFunctionType();
if (spirvType) {
SpirvDebugInstruction *debugType = lowerToDebugType(spirvType);
debugInstr->setDebugType(debugType);
}
}
}
return true;
}
bool DebugTypeVisitor::visit(SpirvModule *module, Phase phase) {
if (phase == Phase::Done)
spvContext.moveDebugTypesToModule(module);
return true;
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvFunction.cpp | //===--- SpirvFunction.cpp - SPIR-V Function Implementation ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvFunction.h"
#include "BlockReadableOrder.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
SpirvFunction::SpirvFunction(QualType returnType, SourceLocation loc,
llvm::StringRef name, bool isPrecise,
bool isNoInline)
: functionId(0), astReturnType(returnType), returnType(nullptr),
fnType(nullptr), relaxedPrecision(false), precise(isPrecise),
noInline(isNoInline), containsAlias(false), rvalue(false),
functionLoc(loc), functionName(name), isWrapperOfEntry(false),
debugScope(nullptr) {}
SpirvFunction::~SpirvFunction() {
for (auto *param : parameters)
param->releaseMemory();
for (auto *var : variables)
var->releaseMemory();
for (auto *bb : basicBlocks)
bb->~SpirvBasicBlock();
if (debugScope)
debugScope->releaseMemory();
for (auto *dd : debugDeclares)
dd->releaseMemory();
}
bool SpirvFunction::invokeVisitor(Visitor *visitor, bool reverseOrder) {
if (!visitor->visit(this, Visitor::Phase::Init))
return false;
const bool debugInfoVulkan = visitor->getCodeGenOptions().debugInfoVulkan;
// When emitting NonSemantic.Shader.DebugInfo.100 the DebugScope and
// DebugDeclares must be emitted in the first basic block, otherwise for
// OpenCL.DebugInfo.100 we can emit them here.
SpirvDebugScope *functionScope = nullptr;
llvm::ArrayRef<SpirvDebugDeclare *> functionDebugDeclares;
if (debugInfoVulkan) {
functionScope = debugScope;
for (auto *param : parameters) {
visitor->visit(param);
}
functionDebugDeclares = debugDeclares;
} else {
if (debugScope && !visitor->visit(debugScope))
return false;
for (auto *param : parameters) {
visitor->visit(param);
}
for (auto *i : debugDeclares)
visitor->visit(i);
}
// Collect basic blocks in a human-readable order that satisfies SPIR-V
// validation rules.
std::vector<SpirvBasicBlock *> orderedBlocks;
if (!basicBlocks.empty()) {
BlockReadableOrderVisitor([&orderedBlocks](SpirvBasicBlock *block) {
orderedBlocks.push_back(block);
}).visit(basicBlocks.front());
}
SpirvBasicBlock *firstBB = orderedBlocks.empty() ? nullptr : orderedBlocks[0];
if (reverseOrder)
std::reverse(orderedBlocks.begin(), orderedBlocks.end());
for (auto *bb : orderedBlocks) {
// The first basic block of the function should first visit the function
// variables.
if (bb == firstBB) {
if (!bb->invokeVisitor(visitor, variables, functionScope,
functionDebugDeclares, reverseOrder))
return false;
}
// The rest of the basic blocks in the function do not need to visit
// function variables.
else {
if (!bb->invokeVisitor(visitor, {}, nullptr, {}, reverseOrder))
return false;
}
}
if (!visitor->visit(this, Visitor::Phase::Done))
return false;
return true;
}
void SpirvFunction::addParameter(SpirvFunctionParameter *param) {
assert(param && "cannot add null function parameter");
parameters.push_back(param);
}
void SpirvFunction::addVariable(SpirvVariable *var) {
assert(var && "cannot add null variable to function");
variables.push_back(var);
}
void SpirvFunction::addBasicBlock(SpirvBasicBlock *bb) {
assert(bb && "cannot add null basic block to function");
basicBlocks.push_back(bb);
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SortDebugInfoVisitor.cpp | //===--- SortDebugInfoVisitor.cpp - Valid order debug instrs -----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "SortDebugInfoVisitor.h"
namespace clang {
namespace spirv {
void SortDebugInfoVisitor::whileEachOperandOfDebugInstruction(
SpirvDebugInstruction *di,
llvm::function_ref<bool(SpirvDebugInstruction *)> visitor) {
if (di == nullptr)
return;
if (di->getDebugType() != nullptr) {
if (!visitor(di->getDebugType()))
return;
}
if (di->getParentScope() != nullptr) {
if (!visitor(di->getParentScope()))
return;
}
switch (di->getKind()) {
case SpirvInstruction::IK_DebugCompilationUnit: {
SpirvDebugCompilationUnit *inst = dyn_cast<SpirvDebugCompilationUnit>(di);
assert(inst != nullptr);
if (!visitor(inst->getDebugSource()))
break;
} break;
case SpirvInstruction::IK_DebugFunctionDecl: {
SpirvDebugFunctionDeclaration *inst =
dyn_cast<SpirvDebugFunctionDeclaration>(di);
assert(inst != nullptr);
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugFunction: {
SpirvDebugFunction *inst = dyn_cast<SpirvDebugFunction>(di);
assert(inst != nullptr);
if (!visitor(inst->getSource()))
break;
if (!visitor(inst->getDebugInfoNone()))
break;
} break;
case SpirvInstruction::IK_DebugFunctionDef: {
SpirvDebugFunctionDefinition *inst =
dyn_cast<SpirvDebugFunctionDefinition>(di);
assert(inst != nullptr);
if (!visitor(inst->getDebugFunction()))
break;
} break;
case SpirvInstruction::IK_DebugEntryPoint: {
SpirvDebugEntryPoint *inst = dyn_cast<SpirvDebugEntryPoint>(di);
assert(inst != nullptr);
if (!visitor(inst->getEntryPoint()))
break;
if (!visitor(inst->getCompilationUnit()))
break;
} break;
case SpirvInstruction::IK_DebugLocalVariable: {
SpirvDebugLocalVariable *inst = dyn_cast<SpirvDebugLocalVariable>(di);
assert(inst != nullptr);
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugGlobalVariable: {
SpirvDebugGlobalVariable *inst = dyn_cast<SpirvDebugGlobalVariable>(di);
assert(inst != nullptr);
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugExpression: {
SpirvDebugExpression *inst = dyn_cast<SpirvDebugExpression>(di);
assert(inst != nullptr);
for (auto *op : inst->getOperations())
if (!visitor(op))
break;
} break;
case SpirvInstruction::IK_DebugLexicalBlock: {
SpirvDebugLexicalBlock *inst = dyn_cast<SpirvDebugLexicalBlock>(di);
assert(inst != nullptr);
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugTypeArray: {
SpirvDebugTypeArray *inst = dyn_cast<SpirvDebugTypeArray>(di);
assert(inst != nullptr);
if (!visitor(inst->getElementType()))
break;
} break;
case SpirvInstruction::IK_DebugTypeVector: {
SpirvDebugTypeVector *inst = dyn_cast<SpirvDebugTypeVector>(di);
assert(inst != nullptr);
if (!visitor(inst->getElementType()))
break;
} break;
case SpirvInstruction::IK_DebugTypeMatrix: {
SpirvDebugTypeMatrix *inst = cast<SpirvDebugTypeMatrix>(di);
assert(inst != nullptr);
visitor(inst->getVectorType());
} break;
case SpirvInstruction::IK_DebugTypeFunction: {
SpirvDebugTypeFunction *inst = dyn_cast<SpirvDebugTypeFunction>(di);
assert(inst != nullptr);
if (!visitor(inst->getReturnType()))
break;
for (auto *param : inst->getParamTypes())
if (!visitor(param))
break;
} break;
case SpirvInstruction::IK_DebugTypeComposite: {
SpirvDebugTypeComposite *inst = dyn_cast<SpirvDebugTypeComposite>(di);
assert(inst != nullptr);
// In terms of DebugTypeTemplate used for a HLSL resource, it has
// to reference DebugTypeComposite but DebugTypeComposite does not
// reference DebugTypeTemplate. DO NOT visit DebugTypeTemplate here.
if (!visitor(inst->getSource()))
break;
if (!visitor(inst->getDebugInfoNone()))
break;
// Note that in OpenCL.DebugInfo.100 DebugTypeComposite always has forward
// references to members. Therefore, the edge direction in DAG must be from
// DebugTypeMember to DebugTypeComposite. DO NOT visit members here.
//
// By comparison, NonSemantic.Shader.DebugInfo.100 bans forward references,
// leaving only the reference from composite to members and not the
// back-reference from member to composite parent. That means we DO want to
// visit members here.
if (spvOptions.debugInfoVulkan) {
for (auto *member : inst->getMembers())
if (!visitor(member))
break;
}
} break;
case SpirvInstruction::IK_DebugTypeMember: {
SpirvDebugTypeMember *inst = dyn_cast<SpirvDebugTypeMember>(di);
assert(inst != nullptr);
if (!visitor(inst->getDebugType()))
break;
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugTypeTemplate: {
SpirvDebugTypeTemplate *inst = dyn_cast<SpirvDebugTypeTemplate>(di);
assert(inst != nullptr);
if (!visitor(inst->getTarget()))
break;
for (auto *param : inst->getParams())
if (!visitor(param))
break;
} break;
case SpirvInstruction::IK_DebugTypeTemplateParameter: {
SpirvDebugTypeTemplateParameter *inst =
dyn_cast<SpirvDebugTypeTemplateParameter>(di);
assert(inst != nullptr);
if (!visitor(inst->getActualType()))
break;
// Value operand of DebugTypeTemplateParameter must be DebugInfoNone
// when it is used for a type not used for a integer value.
if (auto *value = dyn_cast<SpirvDebugInstruction>(inst->getValue())) {
if (!visitor(value))
break;
}
if (!visitor(inst->getSource()))
break;
} break;
case SpirvInstruction::IK_DebugInfoNone:
case SpirvInstruction::IK_DebugSource:
case SpirvInstruction::IK_DebugOperation:
case SpirvInstruction::IK_DebugTypeBasic:
break;
default:
// DebugDeclare and DebugScope must be placed within a function.
assert(false && "unsupported debug instruction");
break;
}
}
bool SortDebugInfoVisitor::visit(SpirvModule *mod, Phase phase) {
if (phase == Phase::Done)
return true;
auto &debugInstructions = mod->getDebugInfo();
// Keep the number of unique debug instructions to verify that it is not
// changed at the end of this visitor.
llvm::SmallSet<SpirvDebugInstruction *, 32> uniqueDebugInstructions;
uniqueDebugInstructions.insert(debugInstructions.begin(),
debugInstructions.end());
auto numberOfDebugInstrs = uniqueDebugInstructions.size();
(void)numberOfDebugInstrs;
// Collect nodes without predecessor.
llvm::SmallSet<SpirvDebugInstruction *, 32> visited;
for (auto *di : debugInstructions) {
whileEachOperandOfDebugInstruction(
di, [&visited](SpirvDebugInstruction *operand) {
if (operand != nullptr)
visited.insert(operand);
return true;
});
}
llvm::SmallVector<SpirvDebugInstruction *, 32> stack;
for (auto *di : debugInstructions) {
if (visited.count(di) == 0)
stack.push_back(di);
}
// Sort debug instructions in a post order. We puts successors in the first
// places of `debugInstructions`. For example, `DebugInfoNone` does not have
// any operand, which means it does not have any successors. We have to place
// it earlier than the instructions using it.
debugInstructions.clear();
visited.clear();
while (!stack.empty()) {
auto *di = stack.back();
visited.insert(di);
whileEachOperandOfDebugInstruction(
di, [&visited, &stack](SpirvDebugInstruction *operand) {
if (operand != nullptr && visited.count(operand) == 0) {
stack.push_back(operand);
return false;
}
return true;
});
if (stack.back() == di) {
debugInstructions.push_back(di);
stack.pop_back();
}
}
// The sort result must have the same number of debug instructions.
assert(numberOfDebugInstrs == debugInstructions.size());
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SignaturePackingUtil.h | //===--- SignaturePackingUtil.h - Utility functions for signature packing -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_SIGNATUREPACKINGUTIL_H
#define LLVM_CLANG_LIB_SPIRV_SIGNATUREPACKINGUTIL_H
#include <vector>
#include "clang/SPIRV/SpirvBuilder.h"
#include "llvm/ADT/STLExtras.h"
#include "StageVar.h"
namespace clang {
namespace spirv {
/// \brief Packs signature by assigning locations and components to stage
/// variables |vars|. |nextLocs| is a function that returns the next available
/// location for the given number of required locations. |spvBuilder| is used to
/// create OpDecorate instructions. |forInput| is true when |vars| are input
/// stage variables.
bool packSignature(SpirvBuilder &spvBuilder,
const std::vector<const StageVar *> &vars,
llvm::function_ref<uint32_t(uint32_t)> nextLocs,
bool forInput);
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/CapabilityVisitor.h | //===--- CapabilityVisitor.h - Capability Visitor ----------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_CAPABILITYVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_CAPABILITYVISITOR_H
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class SpirvBuilder;
class CapabilityVisitor : public Visitor {
public:
CapabilityVisitor(ASTContext &astCtx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts, SpirvBuilder &builder,
FeatureManager &featureMgr)
: Visitor(opts, spvCtx), spvBuilder(builder),
shaderModel(spv::ExecutionModel::Max), featureManager(featureMgr) {}
bool visit(SpirvModule *, Phase) override;
bool visit(SpirvDecoration *decor) override;
bool visit(SpirvEntryPoint *) override;
bool visit(SpirvExecutionMode *) override;
bool visit(SpirvImageQuery *) override;
bool visit(SpirvImageOp *) override;
bool visit(SpirvImageSparseTexelsResident *) override;
bool visit(SpirvExtInstImport *) override;
bool visit(SpirvAtomic *) override;
bool visit(SpirvDemoteToHelperInvocation *) override;
bool visit(SpirvIsHelperInvocationEXT *) override;
bool visit(SpirvReadClock *) override;
using Visitor::visit;
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override;
private:
/// Adds necessary capabilities for using the given type.
/// The called may also provide the storage class for variable types, because
/// in the case of variable types, the storage class may affect the capability
/// that is used.
void addCapabilityForType(const SpirvType *, SourceLocation loc,
spv::StorageClass sc);
/// Checks that the given extension is a valid extension for the target
/// environment (e.g. Vulkan 1.0). And if so, utilizes the SpirvBuilder to add
/// the given extension to the SPIR-V module in memory.
void addExtension(Extension ext, llvm::StringRef target, SourceLocation loc);
/// Checks that the given extension is enabled based on command line arguments
/// before calling addExtension and addCapability.
/// Returns `true` if the extension was enabled, `false` otherwise.
bool addExtensionAndCapabilitiesIfEnabled(
Extension ext, llvm::ArrayRef<spv::Capability> capabilities);
/// Checks that the given capability is a valid capability. And if so,
/// utilizes the SpirvBuilder to add the given capability to the SPIR-V module
/// in memory.
void addCapability(spv::Capability, SourceLocation loc = {});
/// Returns the capability required to non-uniformly index into the given
/// type.
spv::Capability getNonUniformCapability(const SpirvType *);
/// Returns whether the shader model is one of the ray tracing execution
/// models.
bool IsShaderModelForRayTracing();
/// Adds VulkanMemoryModel capability if decoration needs Volatile semantics
/// for OpLoad instructions. For Vulkan 1.3 or above, we can simply add
/// Volatile decoration for the variable. Therefore, in that case, we do not
/// need VulkanMemoryModel capability.
void AddVulkanMemoryModelForVolatile(SpirvDecoration *decor,
SourceLocation loc);
private:
SpirvBuilder &spvBuilder; ///< SPIR-V builder
spv::ExecutionModel shaderModel; ///< Execution model
FeatureManager featureManager; ///< SPIR-V version/extension manager.
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_CAPABILITYVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/AlignmentSizeCalculator.cpp | //===--- AlignmentSizeCalculator.cpp -- Alignemnt And Size Calc --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "AlignmentSizeCalculator.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclTemplate.h"
namespace {
/// The alignment for 4-component float vectors.
constexpr uint32_t kStd140Vec4Alignment = 16u;
/// Rounds the given value up to the given power of 2.
inline uint32_t roundToPow2(uint32_t val, uint32_t pow2) {
assert(pow2 != 0);
return (val + pow2 - 1) & ~(pow2 - 1);
}
/// Returns true if the given vector type (of the given size) crosses the
/// 4-component vector boundary if placed at the given offset.
bool improperStraddle(clang::QualType type, int size, int offset) {
assert(clang::spirv::isVectorType(type));
return size <= 16 ? offset / 16 != (offset + size - 1) / 16
: offset % 16 != 0;
}
} // end anonymous namespace
namespace clang {
namespace spirv {
void AlignmentSizeCalculator::alignUsingHLSLRelaxedLayout(
QualType fieldType, uint32_t fieldSize, uint32_t fieldAlignment,
uint32_t *currentOffset) const {
QualType vecElemType = {};
const bool fieldIsVecType = isVectorType(fieldType, &vecElemType);
// Adjust according to HLSL relaxed layout rules.
// Aligning vectors as their element types so that we can pack a float
// and a float3 tightly together.
if (fieldIsVecType) {
uint32_t scalarAlignment = 0;
std::tie(scalarAlignment, std::ignore) = getAlignmentAndSize(
vecElemType, SpirvLayoutRule::Void, /*isRowMajor*/ llvm::None, nullptr);
if (scalarAlignment <= 4)
fieldAlignment = scalarAlignment;
}
*currentOffset = roundToPow2(*currentOffset, fieldAlignment);
// Adjust according to HLSL relaxed layout rules.
// Bump to 4-component vector alignment if there is a bad straddle
if (fieldIsVecType &&
improperStraddle(fieldType, fieldSize, *currentOffset)) {
fieldAlignment = kStd140Vec4Alignment;
*currentOffset = roundToPow2(*currentOffset, fieldAlignment);
}
}
std::pair<uint32_t, uint32_t> AlignmentSizeCalculator::getAlignmentAndSize(
QualType type, const RecordType *structType, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor, uint32_t *stride) const {
uint32_t maxAlignment = 1;
uint32_t structSize = 0;
// If this struct is derived from some other structs, place an implicit
// field at the very beginning for the base struct.
if (const auto *cxxDecl = dyn_cast<CXXRecordDecl>(structType->getDecl())) {
for (const auto &base : cxxDecl->bases()) {
uint32_t memberAlignment = 0, memberSize = 0;
std::tie(memberAlignment, memberSize) =
getAlignmentAndSize(base.getType(), rule, isRowMajor, stride);
if (rule == SpirvLayoutRule::RelaxedGLSLStd140 ||
rule == SpirvLayoutRule::RelaxedGLSLStd430 ||
rule == SpirvLayoutRule::FxcCTBuffer) {
alignUsingHLSLRelaxedLayout(base.getType(), memberSize, memberAlignment,
&structSize);
} else {
structSize = roundToPow2(structSize, memberAlignment);
}
// The base alignment of the structure is N, where N is the largest
// base alignment value of any of its members...
maxAlignment = std::max(maxAlignment, memberAlignment);
structSize += memberSize;
}
}
const FieldDecl *lastField = nullptr;
uint32_t nextAvailableBitOffset = 0;
for (const FieldDecl *field : structType->getDecl()->fields()) {
uint32_t memberAlignment = 0, memberSize = 0;
std::tie(memberAlignment, memberSize) =
getAlignmentAndSize(field->getType(), rule, isRowMajor, stride);
uint32_t memberBitOffset = 0;
do {
if (!lastField)
break;
if (!field->isBitField() || !lastField->isBitField())
break;
// Not the same type as the previous bitfield. Ignoring.
if (lastField->getType() != field->getType())
break;
// Not enough room left to fit this bitfield. Starting a new slot.
if (nextAvailableBitOffset + field->getBitWidthValue(astContext) >
memberSize * 8)
break;
memberBitOffset = nextAvailableBitOffset;
memberSize = 0;
} while (0);
if (memberSize != 0) {
if (rule == SpirvLayoutRule::RelaxedGLSLStd140 ||
rule == SpirvLayoutRule::RelaxedGLSLStd430 ||
rule == SpirvLayoutRule::FxcCTBuffer) {
alignUsingHLSLRelaxedLayout(field->getType(), memberSize,
memberAlignment, &structSize);
} else {
structSize = roundToPow2(structSize, memberAlignment);
}
}
// Reset the current offset to the one specified in the source code
// if exists. We issues a warning instead of an error if the offset is not
// correctly aligned. This allows uses to disable validation, and use the
// alignment specified in the source code if they are sure that is what they
// want.
if (const auto *offsetAttr = field->getAttr<VKOffsetAttr>()) {
structSize = offsetAttr->getOffset();
if (structSize % memberAlignment != 0) {
emitWarning(
"The offset provided in the attribute should be %0-byte aligned.",
field->getLocation())
<< memberAlignment;
}
}
// The base alignment of the structure is N, where N is the largest
// base alignment value of any of its members...
maxAlignment = std::max(maxAlignment, memberAlignment);
structSize += memberSize;
lastField = field;
nextAvailableBitOffset =
field->isBitField()
? memberBitOffset + field->getBitWidthValue(astContext)
: 0;
}
if (rule == SpirvLayoutRule::Scalar) {
// A structure has a scalar alignment equal to the largest scalar
// alignment of any of its members in VK_EXT_scalar_block_layout.
return {maxAlignment, structSize};
}
if (rule == SpirvLayoutRule::GLSLStd140 ||
rule == SpirvLayoutRule::RelaxedGLSLStd140 ||
rule == SpirvLayoutRule::FxcCTBuffer) {
// ... and rounded up to the base alignment of a vec4.
maxAlignment = roundToPow2(maxAlignment, kStd140Vec4Alignment);
}
if (rule != SpirvLayoutRule::FxcCTBuffer) {
// The base offset of the member following the sub-structure is rounded
// up to the next multiple of the base alignment of the structure.
structSize = roundToPow2(structSize, maxAlignment);
}
return {maxAlignment, structSize};
}
std::pair<uint32_t, uint32_t> AlignmentSizeCalculator::getAlignmentAndSize(
QualType type, SpirvLayoutRule rule, llvm::Optional<bool> isRowMajor,
uint32_t *stride) const {
// std140 layout rules:
// 1. If the member is a scalar consuming N basic machine units, the base
// alignment is N.
//
// 2. If the member is a two- or four-component vector with components
// consuming N basic machine units, the base alignment is 2N or 4N,
// respectively.
//
// 3. If the member is a three-component vector with components consuming N
// basic machine units, the base alignment is 4N.
//
// 4. If the member is an array of scalars or vectors, the base alignment and
// array stride are set to match the base alignment of a single array
// element, according to rules (1), (2), and (3), and rounded up to the
// base alignment of a vec4. The array may have padding at the end; the
// base offset of the member following the array is rounded up to the next
// multiple of the base alignment.
//
// 5. If the member is a column-major matrix with C columns and R rows, the
// matrix is stored identically to an array of C column vectors with R
// components each, according to rule (4).
//
// 6. If the member is an array of S column-major matrices with C columns and
// R rows, the matrix is stored identically to a row of S X C column
// vectors with R components each, according to rule (4).
//
// 7. If the member is a row-major matrix with C columns and R rows, the
// matrix is stored identically to an array of R row vectors with C
// components each, according to rule (4).
//
// 8. If the member is an array of S row-major matrices with C columns and R
// rows, the matrix is stored identically to a row of S X R row vectors
// with C components each, according to rule (4).
//
// 9. If the member is a structure, the base alignment of the structure is N,
// where N is the largest base alignment value of any of its members, and
// rounded up to the base alignment of a vec4. The individual members of
// this substructure are then assigned offsets by applying this set of
// rules recursively, where the base offset of the first member of the
// sub-structure is equal to the aligned offset of the structure. The
// structure may have padding at the end; the base offset of the member
// following the sub-structure is rounded up to the next multiple of the
// base alignment of the structure.
//
// 10. If the member is an array of S structures, the S elements of the array
// are laid out in order, according to rule (9).
//
// This method supports multiple layout rules, all of them modifying the
// std140 rules listed above:
//
// std430:
// - Array base alignment and stride does not need to be rounded up to a
// multiple of 16.
// - Struct base alignment does not need to be rounded up to a multiple of 16.
//
// Relaxed std140/std430:
// - Vector base alignment is set as its element type's base alignment.
//
// FxcCTBuffer:
// - Vector base alignment is set as its element type's base alignment.
// - Arrays/structs do not need to have padding at the end; arrays/structs do
// not affect the base offset of the member following them.
// - For typeNxM matrix, if M > 1,
// - It must be alinged to 16 bytes.
// - Its size must be (16 * (M - 1)) + N * sizeof(type).
// - We have the same rule for column_major typeNxM and row_major typeMxN.
//
// FxcSBuffer:
// - Vector/matrix/array base alignment is set as its element type's base
// alignment.
// - Struct base alignment is set as the maximum of its component's
// alignment.
// - Size of vector/matrix/array is set as the number of its elements times
// the size of its element.
// - Size of struct must be aligned to its alignment.
const auto desugaredType = desugarType(type, &isRowMajor);
if (desugaredType != type) {
return getAlignmentAndSize(desugaredType, rule, isRowMajor, stride);
}
const auto *recordType = type->getAs<RecordType>();
if (recordType != nullptr) {
const llvm::StringRef name = recordType->getDecl()->getName();
if (isTypeInVkNamespace(recordType) && name == "SpirvType") {
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(recordType->getDecl());
const uint64_t size =
templateDecl->getTemplateArgs()[1].getAsIntegral().getZExtValue();
const uint64_t alignment =
templateDecl->getTemplateArgs()[2].getAsIntegral().getZExtValue();
return {alignment, size};
}
}
if (isEnumType(type))
type = astContext.IntTy;
{ // Rule 1
QualType ty = {};
if (isScalarType(type, &ty))
if (const auto *builtinType = ty->getAs<BuiltinType>())
switch (builtinType->getKind()) {
case BuiltinType::Bool:
case BuiltinType::Int:
case BuiltinType::UInt:
case BuiltinType::Float:
return {4, 4};
case BuiltinType::Double:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
return {8, 8};
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::Min16Float:
case BuiltinType::Min10Float: {
if (spvOptions.enable16BitTypes)
return {2, 2};
else
return {4, 4};
}
// the 'Half' enum always represents 16-bit floats.
// int16_t and uint16_t map to Short and UShort.
case BuiltinType::Short:
case BuiltinType::UShort:
case BuiltinType::Half:
return {2, 2};
// 'HalfFloat' always represents 32-bit floats.
case BuiltinType::HalfFloat:
return {4, 4};
default:
emitError("alignment and size calculation for type %0 unimplemented")
<< type;
return {0, 0};
}
}
// FxcCTBuffer for typeNxM matrix where M > 1,
// - It must be alinged to 16 bytes.
// - Its size must be (16 * (M - 1)) + N * sizeof(type).
// - We have the same rule for column_major typeNxM and row_major typeMxN.
if (rule == SpirvLayoutRule::FxcCTBuffer && hlsl::IsHLSLMatType(type)) {
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (!useRowMajor(isRowMajor, type))
std::swap(rowCount, colCount);
if (colCount > 1) {
auto elemType = hlsl::GetHLSLMatElementType(type);
uint32_t alignment = 0, size = 0;
std::tie(alignment, size) =
getAlignmentAndSize(elemType, rule, isRowMajor, stride);
alignment = roundToPow2(alignment * (rowCount == 3 ? 4 : rowCount),
kStd140Vec4Alignment);
*stride = alignment;
return {alignment, 16 * (colCount - 1) + rowCount * size};
}
}
{ // Rule 2 and 3
QualType elemType = {};
uint32_t elemCount = {};
if (isVectorType(type, &elemType, &elemCount)) {
uint32_t alignment = 0, size = 0;
std::tie(alignment, size) =
getAlignmentAndSize(elemType, rule, isRowMajor, stride);
// Use element alignment for fxc rules and VK_EXT_scalar_block_layout
if (rule != SpirvLayoutRule::FxcCTBuffer &&
rule != SpirvLayoutRule::FxcSBuffer &&
rule != SpirvLayoutRule::Scalar)
alignment = (elemCount == 3 ? 4 : elemCount) * size;
return {alignment, elemCount * size};
}
}
{ // Rule 5 and 7
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
uint32_t alignment = 0, size = 0;
std::tie(alignment, size) =
getAlignmentAndSize(elemType, rule, isRowMajor, stride);
// Matrices are treated as arrays of vectors:
// The base alignment and array stride are set to match the base alignment
// of a single array element, according to rules 1, 2, and 3, and rounded
// up to the base alignment of a vec4.
bool rowMajor = useRowMajor(isRowMajor, type);
const uint32_t vecStorageSize = rowMajor ? rowCount : colCount;
if (rule == SpirvLayoutRule::FxcSBuffer ||
rule == SpirvLayoutRule::Scalar) {
*stride = vecStorageSize * size;
// Use element alignment for fxc structured buffers and
// VK_EXT_scalar_block_layout
return {alignment, rowCount * colCount * size};
}
alignment *= (vecStorageSize == 3 ? 4 : vecStorageSize);
if (rule == SpirvLayoutRule::GLSLStd140 ||
rule == SpirvLayoutRule::RelaxedGLSLStd140 ||
rule == SpirvLayoutRule::FxcCTBuffer) {
alignment = roundToPow2(alignment, kStd140Vec4Alignment);
}
*stride = alignment;
size = (rowMajor ? colCount : rowCount) * alignment;
return {alignment, size};
}
}
// Rule 9
if (const auto *structType = type->getAs<RecordType>()) {
return getAlignmentAndSize(type, structType, rule, isRowMajor, stride);
}
// Rule 4, 6, 8, and 10
if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
const auto elemCount = arrayType->getSize().getZExtValue();
uint32_t alignment = 0, size = 0;
std::tie(alignment, size) = getAlignmentAndSize(arrayType->getElementType(),
rule, isRowMajor, stride);
if (rule == SpirvLayoutRule::FxcSBuffer ||
rule == SpirvLayoutRule::Scalar) {
*stride = size;
// Use element alignment for fxc structured buffers and
// VK_EXT_scalar_block_layout
return {alignment, size * elemCount};
}
if (rule == SpirvLayoutRule::GLSLStd140 ||
rule == SpirvLayoutRule::RelaxedGLSLStd140 ||
rule == SpirvLayoutRule::FxcCTBuffer) {
// The base alignment and array stride are set to match the base alignment
// of a single array element, according to rules 1, 2, and 3, and rounded
// up to the base alignment of a vec4.
alignment = roundToPow2(alignment, kStd140Vec4Alignment);
if (size == 0)
size = alignment;
}
if (rule == SpirvLayoutRule::FxcCTBuffer) {
// In fxc cbuffer/tbuffer packing rules, arrays does not affect the data
// packing after it. But we still need to make sure paddings are inserted
// internally if necessary.
*stride = roundToPow2(size, alignment);
size += *stride * (elemCount - 1);
} else {
// Need to round size up considering stride for scalar types
size = roundToPow2(size, alignment);
*stride = size; // Use size instead of alignment here for Rule 10
size *= elemCount;
// The base offset of the member following the array is rounded up to the
// next multiple of the base alignment.
size = roundToPow2(size, alignment);
}
return {alignment, size};
}
emitError("alignment and size calculation for type %0 unimplemented") << type;
return {0, 0};
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SortDebugInfoVisitor.h | //===--- SortDebugInfoVisitor.h - Debug instrs in Valid order ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_SORTDEBUGINFOVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_SORTDEBUGINFOVISITOR_H
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvInstruction.h"
#include "clang/SPIRV/SpirvModule.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class SpirvFunction;
class SpirvBasicBlock;
/// The class responsible to sort rich DebugInfo instructions in a valid order
/// without any invalid forward references.
///
/// Since NonSemantic.Shader.DebugInfo.100 has no valid forward references, the
/// result will have no forward references at all.
class SortDebugInfoVisitor : public Visitor {
public:
SortDebugInfoVisitor(SpirvContext &spvCtx, const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx) {}
// Sorts debug instructions in a post order to remove invalid forward
// references. Note that the post order guarantees a successor node is not
// visited before its predecessor and this property can be used to sort
// instructions in a valid layout without any invalid forward reference.
bool visit(SpirvModule *, Phase);
// Visiting different SPIR-V constructs.
bool visit(SpirvFunction *, Phase) { return true; }
bool visit(SpirvBasicBlock *, Phase) { return true; }
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *) { return true; }
using Visitor::visit;
private:
// Invokes visitor for each operand of the debug instruction `di`. If
// `visitor` returns false, it stops and returns.
void whileEachOperandOfDebugInstruction(
SpirvDebugInstruction *di,
llvm::function_ref<bool(SpirvDebugInstruction *)> visitor);
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_SORTDEBUGINFOVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvModule.cpp | //===--- SpirvModule.cpp - SPIR-V Module Implementation ----------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvModule.h"
#include "clang/SPIRV/SpirvFunction.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
SpirvModule::SpirvModule()
: capabilities({}), extensions({}), extInstSets({}), memoryModel(nullptr),
entryPoints({}), executionModes({}), moduleProcesses({}), decorations({}),
constants({}), undefs({}), variables({}), functions({}),
debugInstructions({}), perVertexInterp(false) {}
SpirvModule::~SpirvModule() {
for (auto *cap : capabilities)
cap->releaseMemory();
for (auto *ext : extensions)
ext->releaseMemory();
for (auto *set : extInstSets)
set->releaseMemory();
if (memoryModel)
memoryModel->releaseMemory();
for (auto *entry : entryPoints)
entry->releaseMemory();
for (auto *exec : executionModes)
exec->releaseMemory();
for (auto *str : constStrings)
str->releaseMemory();
for (auto *d : sources)
d->releaseMemory();
for (auto *mp : moduleProcesses)
mp->releaseMemory();
for (auto *decoration : decorations)
decoration->releaseMemory();
for (auto *constant : constants)
constant->releaseMemory();
for (auto *undef : undefs)
undef->releaseMemory();
for (auto *var : variables)
var->releaseMemory();
for (auto *di : debugInstructions)
di->releaseMemory();
for (auto *f : allFunctions)
f->~SpirvFunction();
}
bool SpirvModule::invokeVisitor(Visitor *visitor, bool reverseOrder) {
// Note: It is debatable whether reverse order of visiting the module should
// reverse everything in this method. For the time being, we just reverse the
// order of the function visitors, and keeping everything else the same.
// For example, it is not clear what the value would be of vising the last
// function first. We can update this methodology if needed.
if (!visitor->visit(this, Visitor::Phase::Init))
return false;
if (reverseOrder) {
// Reverse order of a SPIR-V module.
// Our transformations do not cross function bounaries, therefore the order
// of visiting functions is not important.
for (auto iter = functions.rbegin(); iter != functions.rend(); ++iter) {
auto *fn = *iter;
if (!fn->invokeVisitor(visitor, reverseOrder))
return false;
}
for (auto iter = debugInstructions.rbegin();
iter != debugInstructions.rend(); ++iter) {
auto *debugInstruction = *iter;
if (!debugInstruction->invokeVisitor(visitor))
return false;
}
for (auto iter = variables.rbegin(); iter != variables.rend(); ++iter) {
auto *var = *iter;
if (!var->invokeVisitor(visitor))
return false;
}
for (auto iter = constants.rbegin(); iter != constants.rend(); ++iter) {
auto *constant = *iter;
if (!constant->invokeVisitor(visitor))
return false;
}
for (auto iter = undefs.rbegin(); iter != undefs.rend(); ++iter) {
auto *undef = *iter;
if (!undef->invokeVisitor(visitor))
return false;
}
// Since SetVector doesn't have 'rbegin()' and 'rend()' methods, we use
// manual indexing.
for (auto decorIndex = decorations.size(); decorIndex > 0; --decorIndex) {
auto *decoration = decorations[decorIndex - 1];
if (!decoration->invokeVisitor(visitor))
return false;
}
for (auto iter = moduleProcesses.rbegin(); iter != moduleProcesses.rend();
++iter) {
auto *moduleProcess = *iter;
if (!moduleProcess->invokeVisitor(visitor))
return false;
}
if (!sources.empty())
for (auto iter = sources.rbegin(); iter != sources.rend(); ++iter) {
auto *source = *iter;
if (!source->invokeVisitor(visitor))
return false;
}
for (auto iter = constStrings.rbegin(); iter != constStrings.rend();
++iter) {
if (!(*iter)->invokeVisitor(visitor))
return false;
}
for (auto iter = executionModes.rbegin(); iter != executionModes.rend();
++iter) {
auto *execMode = *iter;
if (!execMode->invokeVisitor(visitor))
return false;
}
for (auto iter = entryPoints.rbegin(); iter != entryPoints.rend(); ++iter) {
auto *entryPoint = *iter;
if (!entryPoint->invokeVisitor(visitor))
return false;
}
if (!memoryModel->invokeVisitor(visitor))
return false;
for (auto iter = extInstSets.rbegin(); iter != extInstSets.rend(); ++iter) {
auto *extInstSet = *iter;
if (!extInstSet->invokeVisitor(visitor))
return false;
}
// Since SetVector doesn't have 'rbegin()' and 'rend()' methods, we use
// manual indexing.
for (auto extIndex = extensions.size(); extIndex > 0; --extIndex) {
auto *extension = extensions[extIndex - 1];
if (!extension->invokeVisitor(visitor))
return false;
}
// Since SetVector doesn't have 'rbegin()' and 'rend()' methods, we use
// manual indexing.
for (auto capIndex = capabilities.size(); capIndex > 0; --capIndex) {
auto *capability = capabilities[capIndex - 1];
if (!capability->invokeVisitor(visitor))
return false;
}
}
// Traverse the regular order of a SPIR-V module.
else {
for (auto *cap : capabilities)
if (!cap->invokeVisitor(visitor))
return false;
for (auto ext : extensions)
if (!ext->invokeVisitor(visitor))
return false;
for (auto extInstSet : extInstSets)
if (!extInstSet->invokeVisitor(visitor))
return false;
if (!memoryModel->invokeVisitor(visitor))
return false;
for (auto entryPoint : entryPoints)
if (!entryPoint->invokeVisitor(visitor))
return false;
for (auto execMode : executionModes)
if (!execMode->invokeVisitor(visitor))
return false;
for (auto *str : constStrings)
if (!str->invokeVisitor(visitor))
return false;
if (!sources.empty())
for (auto *source : sources)
if (!source->invokeVisitor(visitor))
return false;
for (auto moduleProcess : moduleProcesses)
if (!moduleProcess->invokeVisitor(visitor))
return false;
for (auto decoration : decorations)
if (!decoration->invokeVisitor(visitor))
return false;
for (auto constant : constants)
if (!constant->invokeVisitor(visitor))
return false;
for (auto undef : undefs)
if (!undef->invokeVisitor(visitor))
return false;
for (auto var : variables)
if (!var->invokeVisitor(visitor))
return false;
for (size_t i = 0; i < debugInstructions.size(); i++)
if (!debugInstructions[i]->invokeVisitor(visitor))
return false;
for (auto fn : functions)
if (!fn->invokeVisitor(visitor, reverseOrder))
return false;
}
if (!visitor->visit(this, Visitor::Phase::Done))
return false;
return true;
}
void SpirvModule::addFunctionToListOfSortedModuleFunctions(SpirvFunction *fn) {
assert(fn && "cannot add null function to the module");
functions.push_back(fn);
}
void SpirvModule::addFunction(SpirvFunction *fn) {
assert(fn && "cannot add null function to the module");
allFunctions.insert(fn);
}
bool SpirvModule::addCapability(SpirvCapability *cap) {
assert(cap && "cannot add null capability to the module");
return capabilities.insert(cap);
}
bool SpirvModule::hasCapability(SpirvCapability &cap) {
return capabilities.count(&cap) != 0;
}
void SpirvModule::setMemoryModel(SpirvMemoryModel *model) {
assert(model && "cannot set a null memory model");
if (memoryModel)
memoryModel->releaseMemory();
memoryModel = model;
}
bool SpirvModule::promoteAddressingModel(spv::AddressingModel addrModel) {
assert(memoryModel && "base memory model must be set first");
auto getPriority = [](spv::AddressingModel am) -> int {
switch (am) {
default:
assert(false && "unknown addressing model");
return 0;
case spv::AddressingModel::Logical:
return 0;
case spv::AddressingModel::Physical32:
return 1;
case spv::AddressingModel::Physical64:
return 2;
case spv::AddressingModel::PhysicalStorageBuffer64:
return 3;
}
};
int current = getPriority(memoryModel->getAddressingModel());
int pending = getPriority(addrModel);
if (pending > current) {
memoryModel->setAddressingModel(addrModel);
return true;
} else {
return false;
}
}
void SpirvModule::addEntryPoint(SpirvEntryPoint *ep) {
assert(ep && "cannot add null as an entry point");
entryPoints.push_back(ep);
}
SpirvExecutionMode *SpirvModule::findExecutionMode(SpirvFunction *entryPoint,
spv::ExecutionMode em) {
for (SpirvExecutionMode *cem : executionModes) {
if (cem->getEntryPoint() != entryPoint)
continue;
if (cem->getExecutionMode() != em)
continue;
return cem;
}
return nullptr;
}
void SpirvModule::addExecutionMode(SpirvExecutionMode *em) {
assert(em && "cannot add null execution mode");
executionModes.push_back(em);
}
bool SpirvModule::addExtension(SpirvExtension *ext) {
assert(ext && "cannot add null extension");
return extensions.insert(ext);
}
void SpirvModule::addExtInstSet(SpirvExtInstImport *set) {
assert(set && "cannot add null extended instruction set");
extInstSets.push_back(set);
}
SpirvExtInstImport *SpirvModule::getExtInstSet(llvm::StringRef name) {
// We expect very few (usually 1) extended instruction sets to exist in the
// module, so this is not expensive.
auto found = std::find_if(extInstSets.begin(), extInstSets.end(),
[name](const SpirvExtInstImport *set) {
return set->getExtendedInstSetName() == name;
});
if (found != extInstSets.end())
return *found;
return nullptr;
}
void SpirvModule::addVariable(SpirvVariable *var) {
assert(var && "cannot add null variable to the module");
variables.push_back(var);
}
void SpirvModule::addDecoration(SpirvDecoration *decor) {
assert(decor && "cannot add null decoration to the module");
decorations.insert(decor);
}
void SpirvModule::addConstant(SpirvConstant *constant) {
assert(constant);
constants.push_back(constant);
}
void SpirvModule::addUndef(SpirvUndef *undef) {
assert(undef);
undefs.push_back(undef);
}
void SpirvModule::addString(SpirvString *str) {
assert(str);
constStrings.push_back(str);
}
void SpirvModule::addSource(SpirvSource *src) {
assert(src);
sources.push_back(src);
}
void SpirvModule::addDebugInfo(SpirvDebugInstruction *info) {
assert(info);
debugInstructions.push_back(info);
}
void SpirvModule::addModuleProcessed(SpirvModuleProcessed *p) {
assert(p);
moduleProcesses.push_back(p);
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/LowerTypeVisitor.h | //===--- LowerTypeVisitor.h - AST type to SPIR-V type visitor ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_LOWERTYPEVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_LOWERTYPEVISITOR_H
#include "AlignmentSizeCalculator.h"
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
#include "llvm/ADT/Optional.h"
namespace clang {
namespace spirv {
/// The class responsible to translate Clang frontend types into SPIR-V types.
class LowerTypeVisitor : public Visitor {
public:
LowerTypeVisitor(ASTContext &astCtx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts, SpirvBuilder &builder)
: Visitor(opts, spvCtx), astContext(astCtx), spvContext(spvCtx),
alignmentCalc(astCtx, opts), useArrayForMat1xN(false),
spvBuilder(builder) {}
// Visiting different SPIR-V constructs.
bool visit(SpirvModule *, Phase) override { return true; }
bool visit(SpirvFunction *, Phase) override;
bool visit(SpirvBasicBlock *, Phase) override { return true; }
using Visitor::visit;
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override;
/// Lowers the given AST QualType into the corresponding SPIR-V type.
///
/// The lowering is recursive; all the types that the target type depends
/// on will be created in SpirvContext.
const SpirvType *lowerType(QualType type, SpirvLayoutRule,
llvm::Optional<bool> isRowMajor, SourceLocation);
bool useSpvArrayForHlslMat1xN() { return useArrayForMat1xN; }
private:
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation srcLoc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
/// Lowers the given Hybrid type into a SPIR-V type.
///
/// Uses the above lowerType method to lower the QualType components of hybrid
/// types.
const SpirvType *lowerType(const SpirvType *, SpirvLayoutRule,
SourceLocation);
/// Lowers the given HLSL resource type into its SPIR-V type.
const SpirvType *lowerResourceType(QualType type, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor,
SourceLocation);
/// Lowers the fields of a RecordDecl into SPIR-V StructType field
/// information.
llvm::SmallVector<StructType::FieldInfo, 4>
lowerStructFields(const RecordDecl *structType, SpirvLayoutRule rule);
/// Creates the default AST type from a TemplateName for HLSL templates
/// which have optional parameters (e.g. Texture2D).
QualType createASTTypeFromTemplateName(TemplateName templateName);
/// If the given type is an integral_constant or a Literal<integral_constant>,
/// return the constant value as a SpirvConstant, which will be set as a
/// literal constant if wrapped in Literal.
bool getVkIntegralConstantValue(QualType type, SpirvConstant *&result,
SourceLocation srcLoc);
/// Lowers the given vk::SpirvType or vk::SpirvOpaqueType into its SPIR-V
/// type.
const SpirvType *
lowerInlineSpirvType(llvm::StringRef name, unsigned int opcode,
const ClassTemplateSpecializationDecl *specDecl,
SpirvLayoutRule rule, llvm::Optional<bool> isRowMajor,
SourceLocation srcLoc);
/// Lowers the given type defined in vk namespace into its SPIR-V type.
const SpirvType *lowerVkTypeInVkNamespace(QualType type, llvm::StringRef name,
SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor,
SourceLocation srcLoc);
/// For the given sampled type, returns the corresponding image format
/// that can be used to create an image object.
spv::ImageFormat translateSampledTypeToImageFormat(QualType sampledType,
SourceLocation);
private:
/// Calculates all layout information needed for the given structure fields.
/// Returns the lowered field info vector.
/// In other words: lowers the HybridStructType field information to
/// StructType field information.
llvm::SmallVector<StructType::FieldInfo, 4>
populateLayoutInformation(llvm::ArrayRef<HybridStructType::FieldInfo> fields,
SpirvLayoutRule rule);
/// Create a clang::StructType::FieldInfo from HybridStructType::FieldInfo.
/// This function only considers the field as standalone.
/// Offset and layout constraint from the parent struct are not considered.
StructType::FieldInfo lowerField(const HybridStructType::FieldInfo *field,
SpirvLayoutRule rule,
const uint32_t fieldIndex);
private:
ASTContext &astContext; /// AST context
SpirvContext &spvContext; /// SPIR-V context
AlignmentSizeCalculator alignmentCalc; /// alignment calculator
bool useArrayForMat1xN; /// SPIR-V array for HLSL Matrix 1xN
SpirvBuilder &spvBuilder;
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_LOWERTYPEVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvBuilder.cpp | //===--- SpirvBuilder.cpp - SPIR-V Builder Implementation --------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvBuilder.h"
#include "CapabilityVisitor.h"
#include "DebugTypeVisitor.h"
#include "EmitVisitor.h"
#include "LiteralTypeVisitor.h"
#include "LowerTypeVisitor.h"
#include "NonUniformVisitor.h"
#include "PervertexInputVisitor.h"
#include "PreciseVisitor.h"
#include "RelaxedPrecisionVisitor.h"
#include "RemoveBufferBlockVisitor.h"
#include "SortDebugInfoVisitor.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/String.h"
namespace clang {
namespace spirv {
SpirvBuilder::SpirvBuilder(ASTContext &ac, SpirvContext &ctx,
const SpirvCodeGenOptions &opt,
FeatureManager &featureMgr)
: astContext(ac), context(ctx), featureManager(featureMgr),
mod(llvm::make_unique<SpirvModule>()), function(nullptr),
moduleInit(nullptr), moduleInitInsertPoint(nullptr), spirvOptions(opt),
builtinVars(), debugNone(nullptr), nullDebugExpr(nullptr),
stringLiterals(), emptyString(nullptr) {}
SpirvFunction *SpirvBuilder::createSpirvFunction(QualType returnType,
SourceLocation loc,
llvm::StringRef name,
bool isPrecise,
bool isNoInline) {
auto *fn =
new (context) SpirvFunction(returnType, loc, name, isPrecise, isNoInline);
mod->addFunction(fn);
return fn;
}
SpirvFunction *SpirvBuilder::beginFunction(QualType returnType,
SourceLocation loc,
llvm::StringRef funcName,
bool isPrecise, bool isNoInline,
SpirvFunction *func) {
assert(!function && "found nested function");
if (func) {
function = func;
function->setAstReturnType(returnType);
function->setSourceLocation(loc);
function->setFunctionName(funcName);
function->setPrecise(isPrecise);
function->setNoInline(isNoInline);
} else {
function =
createSpirvFunction(returnType, loc, funcName, isPrecise, isNoInline);
}
return function;
}
SpirvFunctionParameter *
SpirvBuilder::addFnParam(QualType ptrType, bool isPrecise, bool isNointerp,
SourceLocation loc, llvm::StringRef name) {
assert(function && "found detached parameter");
SpirvFunctionParameter *param = nullptr;
if (isBindlessOpaqueArray(ptrType)) {
// If it is a bindless array of an opaque type, we have to use
// a pointer to a pointer of the runtime array.
param = new (context) SpirvFunctionParameter(
context.getPointerType(ptrType, spv::StorageClass::UniformConstant),
isPrecise, isNointerp, loc);
} else {
param = new (context)
SpirvFunctionParameter(ptrType, isPrecise, isNointerp, loc);
}
param->setStorageClass(spv::StorageClass::Function);
param->setDebugName(name);
function->addParameter(param);
return param;
}
SpirvVariable *SpirvBuilder::addFnVar(QualType valueType, SourceLocation loc,
llvm::StringRef name, bool isPrecise,
bool isNointerp, SpirvInstruction *init) {
assert(function && "found detached local variable");
SpirvVariable *var = nullptr;
if (isBindlessOpaqueArray(valueType)) {
// If it is a bindless array of an opaque type, we have to use
// a pointer to a pointer of the runtime array.
var = new (context) SpirvVariable(
context.getPointerType(valueType, spv::StorageClass::UniformConstant),
loc, spv::StorageClass::Function, isPrecise, isNointerp, init);
} else {
var =
new (context) SpirvVariable(valueType, loc, spv::StorageClass::Function,
isPrecise, isNointerp, init);
}
var->setDebugName(name);
function->addVariable(var);
return var;
}
void SpirvBuilder::endFunction() {
assert(function && "no active function");
mod->addFunctionToListOfSortedModuleFunctions(function);
function = nullptr;
insertPoint = nullptr;
}
SpirvBasicBlock *SpirvBuilder::createBasicBlock(llvm::StringRef name) {
assert(function && "found detached basic block");
auto *bb = new (context) SpirvBasicBlock(name);
function->addBasicBlock(bb);
if (auto *scope = context.getCurrentLexicalScope())
bb->setDebugScope(new (context) SpirvDebugScope(scope));
return bb;
}
SpirvDebugScope *SpirvBuilder::createDebugScope(SpirvDebugInstruction *scope) {
assert(insertPoint && "null insert point");
auto *dbgScope = new (context) SpirvDebugScope(scope);
insertPoint->addInstruction(dbgScope);
return dbgScope;
}
void SpirvBuilder::addSuccessor(SpirvBasicBlock *successorBB) {
assert(insertPoint && "null insert point");
insertPoint->addSuccessor(successorBB);
}
void SpirvBuilder::setMergeTarget(SpirvBasicBlock *mergeLabel) {
assert(insertPoint && "null insert point");
insertPoint->setMergeTarget(mergeLabel);
}
void SpirvBuilder::setContinueTarget(SpirvBasicBlock *continueLabel) {
assert(insertPoint && "null insert point");
insertPoint->setContinueTarget(continueLabel);
}
SpirvCompositeConstruct *SpirvBuilder::createCompositeConstruct(
QualType resultType, llvm::ArrayRef<SpirvInstruction *> constituents,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvCompositeConstruct(resultType, loc, constituents, range);
insertPoint->addInstruction(instruction);
if (!constituents.empty()) {
instruction->setLayoutRule(constituents[0]->getLayoutRule());
}
return instruction;
}
SpirvCompositeExtract *SpirvBuilder::createCompositeExtract(
QualType resultType, SpirvInstruction *composite,
llvm::ArrayRef<uint32_t> indexes, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvCompositeExtract(resultType, loc, composite, indexes, range);
instruction->setRValue();
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvCompositeInsert *SpirvBuilder::createCompositeInsert(
QualType resultType, SpirvInstruction *composite,
llvm::ArrayRef<uint32_t> indices, SpirvInstruction *object,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvCompositeInsert(resultType, loc, composite, object, indices, range);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvVectorShuffle *SpirvBuilder::createVectorShuffle(
QualType resultType, SpirvInstruction *vector1, SpirvInstruction *vector2,
llvm::ArrayRef<uint32_t> selectors, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvVectorShuffle(resultType, loc, vector1, vector2, selectors, range);
instruction->setRValue();
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvInstruction *SpirvBuilder::createLoad(QualType resultType,
SpirvInstruction *pointer,
SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context) SpirvLoad(resultType, loc, pointer, range);
instruction->setStorageClass(pointer->getStorageClass());
instruction->setLayoutRule(pointer->getLayoutRule());
instruction->setRValue(true);
if (pointer->containsAliasComponent() &&
isAKindOfStructuredOrByteBuffer(resultType)) {
instruction->setStorageClass(spv::StorageClass::Uniform);
// Now it is a pointer to the global resource, which is lvalue.
instruction->setRValue(false);
// Set to false to indicate that we've performed dereference over the
// pointer-to-pointer and now should fallback to the normal path
instruction->setContainsAliasComponent(false);
}
if (pointer->isRasterizerOrdered()) {
createBeginInvocationInterlockEXT(loc, range);
}
insertPoint->addInstruction(instruction);
if (pointer->isRasterizerOrdered()) {
createEndInvocationInterlockEXT(loc, range);
}
const auto &bitfieldInfo = pointer->getBitfieldInfo();
if (!bitfieldInfo.hasValue())
return instruction;
return createBitFieldExtract(resultType, instruction,
bitfieldInfo->offsetInBits,
bitfieldInfo->sizeInBits, loc, range);
}
SpirvCopyObject *SpirvBuilder::createCopyObject(QualType resultType,
SpirvInstruction *pointer,
SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *instruction = new (context) SpirvCopyObject(resultType, loc, pointer);
instruction->setStorageClass(pointer->getStorageClass());
instruction->setLayoutRule(pointer->getLayoutRule());
// The result of OpCopyObject is always an rvalue.
instruction->setRValue(true);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvLoad *SpirvBuilder::createLoad(const SpirvType *resultType,
SpirvInstruction *pointer,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvLoad(/*QualType*/ {}, loc, pointer, range);
instruction->setResultType(resultType);
instruction->setStorageClass(pointer->getStorageClass());
// Special case for legalization. We could have point-to-pointer types.
// For example:
//
// %var = OpVariable %_ptr_Private__ptr_Uniform_type_X Private
// %1 = OpLoad %_ptr_Uniform_type_X %var
//
// Loading from %var should result in Uniform storage class, not Private.
if (const auto *ptrType = dyn_cast<SpirvPointerType>(resultType)) {
instruction->setStorageClass(ptrType->getStorageClass());
}
instruction->setLayoutRule(pointer->getLayoutRule());
instruction->setRValue(true);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvStore *SpirvBuilder::createStore(SpirvInstruction *address,
SpirvInstruction *value,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
// Safeguard. If this happens, it means we leak non-extracted bitfields.
assert(false == value->getBitfieldInfo().hasValue());
if (address->isRasterizerOrdered()) {
createBeginInvocationInterlockEXT(loc, range);
}
SpirvInstruction *source = value;
const auto &bitfieldInfo = address->getBitfieldInfo();
if (bitfieldInfo.hasValue()) {
// Generate SPIR-V type for value. This is required to know the final
// layout.
LowerTypeVisitor lowerTypeVisitor(astContext, context, spirvOptions, *this);
lowerTypeVisitor.visitInstruction(value);
context.addToInstructionsWithLoweredType(value);
auto *base = createLoad(value->getResultType(), address, loc, range);
source = createBitFieldInsert(/*QualType*/ {}, base, value,
bitfieldInfo->offsetInBits,
bitfieldInfo->sizeInBits, loc, range);
source->setResultType(value->getResultType());
}
auto *instruction =
new (context) SpirvStore(loc, address, source, llvm::None, range);
insertPoint->addInstruction(instruction);
if (address->isRasterizerOrdered()) {
createEndInvocationInterlockEXT(loc, range);
}
if (isa<SpirvLoad>(value) && isa<SpirvVariable>(address)) {
auto paramPtr = dyn_cast<SpirvLoad>(value)->getPointer();
while (isa<SpirvAccessChain>(paramPtr)) {
paramPtr = dyn_cast<SpirvAccessChain>(paramPtr)->getBase();
}
if (isa<SpirvFunctionParameter>(paramPtr))
function->addFuncParamVarEntry(address,
dyn_cast<SpirvLoad>(value)->getPointer());
}
return instruction;
}
SpirvFunctionCall *
SpirvBuilder::createFunctionCall(QualType returnType, SpirvFunction *func,
llvm::ArrayRef<SpirvInstruction *> params,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvFunctionCall(returnType, loc, func, params, range);
instruction->setRValue(func->isRValue());
instruction->setContainsAliasComponent(func->constainsAliasComponent());
if (func->constainsAliasComponent() &&
isAKindOfStructuredOrByteBuffer(returnType)) {
instruction->setStorageClass(spv::StorageClass::Uniform);
// Now it is a pointer to the global resource, which is lvalue.
instruction->setRValue(false);
// Set to false to indicate that we've performed dereference over the
// pointer-to-pointer and now should fallback to the normal path
instruction->setContainsAliasComponent(false);
}
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvAccessChain *SpirvBuilder::createAccessChain(
const SpirvType *resultType, SpirvInstruction *base,
llvm::ArrayRef<SpirvInstruction *> indexes, SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvAccessChain(/*QualType*/ {}, loc, base, indexes);
instruction->setResultType(resultType);
instruction->setStorageClass(base->getStorageClass());
instruction->setLayoutRule(base->getLayoutRule());
instruction->setContainsAliasComponent(base->containsAliasComponent());
// If doing an access chain into a structured or byte address buffer, make
// sure the layout rule is sBufferLayoutRule.
if (base->hasAstResultType() &&
isAKindOfStructuredOrByteBuffer(base->getAstResultType()))
instruction->setLayoutRule(spirvOptions.sBufferLayoutRule);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvAccessChain *
SpirvBuilder::createAccessChain(QualType resultType, SpirvInstruction *base,
llvm::ArrayRef<SpirvInstruction *> indexes,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvAccessChain(resultType, loc, base, indexes, range);
instruction->setStorageClass(base->getStorageClass());
instruction->setLayoutRule(base->getLayoutRule());
instruction->setContainsAliasComponent(base->containsAliasComponent());
// If doing an access chain into a structured or byte address buffer, make
// sure the layout rule is sBufferLayoutRule.
if (base->hasAstResultType() &&
isAKindOfStructuredOrByteBuffer(base->getAstResultType()))
instruction->setLayoutRule(spirvOptions.sBufferLayoutRule);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvUnaryOp *SpirvBuilder::createUnaryOp(spv::Op op, QualType resultType,
SpirvInstruction *operand,
SourceLocation loc,
SourceRange range) {
if (!operand)
return nullptr;
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvUnaryOp(op, resultType, loc, operand, range);
insertPoint->addInstruction(instruction);
instruction->setLayoutRule(operand->getLayoutRule());
return instruction;
}
SpirvUnaryOp *SpirvBuilder::createUnaryOp(spv::Op op,
const SpirvType *resultType,
SpirvInstruction *operand,
SourceLocation loc) {
if (!operand)
return nullptr;
assert(insertPoint && "null insert point");
auto *instruction = new (context) SpirvUnaryOp(op, resultType, loc, operand);
instruction->setLayoutRule(operand->getLayoutRule());
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvBinaryOp *SpirvBuilder::createBinaryOp(spv::Op op, QualType resultType,
SpirvInstruction *lhs,
SpirvInstruction *rhs,
SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvBinaryOp(op, resultType, loc, lhs, rhs, range);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvSpecConstantBinaryOp *SpirvBuilder::createSpecConstantBinaryOp(
spv::Op op, QualType resultType, SpirvInstruction *lhs,
SpirvInstruction *rhs, SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvSpecConstantBinaryOp(op, resultType, loc, lhs, rhs);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvGroupNonUniformOp *SpirvBuilder::createGroupNonUniformOp(
spv::Op op, QualType resultType, spv::Scope execScope,
llvm::ArrayRef<SpirvInstruction *> operands, SourceLocation loc,
llvm::Optional<spv::GroupOperation> groupOp) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvGroupNonUniformOp(op, resultType, execScope, operands, loc, groupOp);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvAtomic *SpirvBuilder::createAtomicOp(
spv::Op opcode, QualType resultType, SpirvInstruction *originalValuePtr,
spv::Scope scope, spv::MemorySemanticsMask memorySemantics,
SpirvInstruction *valueToOp, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction =
new (context) SpirvAtomic(opcode, resultType, loc, originalValuePtr,
scope, memorySemantics, valueToOp, range);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvAtomic *SpirvBuilder::createAtomicCompareExchange(
QualType resultType, SpirvInstruction *originalValuePtr, spv::Scope scope,
spv::MemorySemanticsMask equalMemorySemantics,
spv::MemorySemanticsMask unequalMemorySemantics,
SpirvInstruction *valueToOp, SpirvInstruction *comparator,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvAtomic(spv::Op::OpAtomicCompareExchange, resultType, loc,
originalValuePtr, scope, equalMemorySemantics,
unequalMemorySemantics, valueToOp, comparator, range);
insertPoint->addInstruction(instruction);
return instruction;
}
SpirvSampledImage *SpirvBuilder::createSampledImage(QualType imageType,
SpirvInstruction *image,
SpirvInstruction *sampler,
SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *sampledImage =
new (context) SpirvSampledImage(imageType, loc, image, sampler, range);
insertPoint->addInstruction(sampledImage);
return sampledImage;
}
SpirvImageTexelPointer *SpirvBuilder::createImageTexelPointer(
QualType resultType, SpirvInstruction *image, SpirvInstruction *coordinate,
SpirvInstruction *sample, SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *instruction = new (context)
SpirvImageTexelPointer(resultType, loc, image, coordinate, sample);
insertPoint->addInstruction(instruction);
return instruction;
}
spv::ImageOperandsMask SpirvBuilder::composeImageOperandsMask(
SpirvInstruction *bias, SpirvInstruction *lod,
const std::pair<SpirvInstruction *, SpirvInstruction *> &grad,
SpirvInstruction *constOffset, SpirvInstruction *varOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *minLod) {
using spv::ImageOperandsMask;
// SPIR-V Image Operands from least significant bit to most significant bit
// Bias, Lod, Grad, ConstOffset, Offset, ConstOffsets, Sample, MinLod
auto mask = ImageOperandsMask::MaskNone;
if (bias) {
mask = mask | ImageOperandsMask::Bias;
}
if (lod) {
mask = mask | ImageOperandsMask::Lod;
}
if (grad.first && grad.second) {
mask = mask | ImageOperandsMask::Grad;
}
if (constOffset) {
mask = mask | ImageOperandsMask::ConstOffset;
}
if (varOffset) {
mask = mask | ImageOperandsMask::Offset;
}
if (constOffsets) {
mask = mask | ImageOperandsMask::ConstOffsets;
}
if (sample) {
mask = mask | ImageOperandsMask::Sample;
}
if (minLod) {
mask = mask | ImageOperandsMask::MinLod;
}
return mask;
}
SpirvInstruction *SpirvBuilder::createImageSample(
QualType texelType, QualType imageType, SpirvInstruction *image,
SpirvInstruction *sampler, SpirvInstruction *coordinate,
SpirvInstruction *compareVal, SpirvInstruction *bias, SpirvInstruction *lod,
std::pair<SpirvInstruction *, SpirvInstruction *> grad,
SpirvInstruction *constOffset, SpirvInstruction *varOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *minLod, SpirvInstruction *residencyCode,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
// The Lod and Grad image operands requires explicit-lod instructions.
// Otherwise we use implicit-lod instructions.
const bool isExplicit = lod || (grad.first && grad.second);
const bool isSparse = (residencyCode != nullptr);
spv::Op op = spv::Op::Max;
if (compareVal) {
op = isExplicit ? (isSparse ? spv::Op::OpImageSparseSampleDrefExplicitLod
: spv::Op::OpImageSampleDrefExplicitLod)
: (isSparse ? spv::Op::OpImageSparseSampleDrefImplicitLod
: spv::Op::OpImageSampleDrefImplicitLod);
} else {
op = isExplicit ? (isSparse ? spv::Op::OpImageSparseSampleExplicitLod
: spv::Op::OpImageSampleExplicitLod)
: (isSparse ? spv::Op::OpImageSparseSampleImplicitLod
: spv::Op::OpImageSampleImplicitLod);
}
// minLod is only valid with Implicit instructions and Grad instructions.
// This means that we cannot have Lod and minLod together because Lod requires
// explicit insturctions. So either lod or minLod or both must be zero.
assert(lod == nullptr || minLod == nullptr);
// An OpSampledImage is required to do the image sampling.
auto *sampledImage =
createSampledImage(imageType, image, sampler, loc, range);
const auto mask = composeImageOperandsMask(
bias, lod, grad, constOffset, varOffset, constOffsets, sample, minLod);
auto *imageSampleInst = new (context) SpirvImageOp(
op, texelType, loc, sampledImage, coordinate, mask, compareVal, bias, lod,
grad.first, grad.second, constOffset, varOffset, constOffsets, sample,
minLod, nullptr, nullptr, range);
insertPoint->addInstruction(imageSampleInst);
if (isSparse) {
// Write the Residency Code
const auto status = createCompositeExtract(
astContext.UnsignedIntTy, imageSampleInst, {0}, loc, range);
createStore(residencyCode, status, loc, range);
// Extract the real result from the struct
return createCompositeExtract(texelType, imageSampleInst, {1}, loc, range);
}
return imageSampleInst;
}
SpirvInstruction *SpirvBuilder::createImageFetchOrRead(
bool doImageFetch, QualType texelType, QualType imageType,
SpirvInstruction *image, SpirvInstruction *coordinate,
SpirvInstruction *lod, SpirvInstruction *constOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *residencyCode, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
const auto mask = composeImageOperandsMask(
/*bias*/ nullptr, lod, std::make_pair(nullptr, nullptr), constOffset,
/*varOffset*/ nullptr, constOffsets, sample, /*minLod*/ nullptr);
const bool isSparse = (residencyCode != nullptr);
spv::Op op =
doImageFetch
? (isSparse ? spv::Op::OpImageSparseFetch : spv::Op::OpImageFetch)
: (isSparse ? spv::Op::OpImageSparseRead : spv::Op::OpImageRead);
auto *fetchOrReadInst = new (context)
SpirvImageOp(op, texelType, loc, image, coordinate, mask,
/*dref*/ nullptr, /*bias*/ nullptr, lod, /*gradDx*/ nullptr,
/*gradDy*/ nullptr, constOffset, /*varOffset*/ nullptr,
constOffsets, sample, nullptr, nullptr, nullptr, range);
insertPoint->addInstruction(fetchOrReadInst);
if (isSparse) {
// Write the Residency Code
const auto status = createCompositeExtract(
astContext.UnsignedIntTy, fetchOrReadInst, {0}, loc, range);
createStore(residencyCode, status, loc, range);
// Extract the real result from the struct
return createCompositeExtract(texelType, fetchOrReadInst, {1}, loc, range);
}
return fetchOrReadInst;
}
void SpirvBuilder::createImageWrite(QualType imageType, SpirvInstruction *image,
SpirvInstruction *coord,
SpirvInstruction *texel, SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *writeInst = new (context) SpirvImageOp(
spv::Op::OpImageWrite, imageType, loc, image, coord,
spv::ImageOperandsMask::MaskNone,
/*dref*/ nullptr, /*bias*/ nullptr, /*lod*/ nullptr, /*gradDx*/ nullptr,
/*gradDy*/ nullptr, /*constOffset*/ nullptr, /*varOffset*/ nullptr,
/*constOffsets*/ nullptr, /*sample*/ nullptr, /*minLod*/ nullptr,
/*component*/ nullptr, texel, range);
insertPoint->addInstruction(writeInst);
}
SpirvInstruction *SpirvBuilder::createImageGather(
QualType texelType, QualType imageType, SpirvInstruction *image,
SpirvInstruction *sampler, SpirvInstruction *coordinate,
SpirvInstruction *component, SpirvInstruction *compareVal,
SpirvInstruction *constOffset, SpirvInstruction *varOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *residencyCode, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
// An OpSampledImage is required to do the image sampling.
auto *sampledImage =
createSampledImage(imageType, image, sampler, loc, range);
// TODO: Update ImageGather to accept minLod if necessary.
const auto mask = composeImageOperandsMask(
/*bias*/ nullptr, /*lod*/ nullptr, std::make_pair(nullptr, nullptr),
constOffset, varOffset, constOffsets, sample, /*minLod*/ nullptr);
spv::Op op = compareVal ? (residencyCode ? spv::Op::OpImageSparseDrefGather
: spv::Op::OpImageDrefGather)
: (residencyCode ? spv::Op::OpImageSparseGather
: spv::Op::OpImageGather);
// Note: OpImageSparseDrefGather and OpImageDrefGather do not take the
// component parameter.
if (compareVal)
component = nullptr;
auto *imageInstruction = new (context) SpirvImageOp(
op, texelType, loc, sampledImage, coordinate, mask, compareVal,
/*bias*/ nullptr, /*lod*/ nullptr, /*gradDx*/ nullptr,
/*gradDy*/ nullptr, constOffset, varOffset, constOffsets, sample,
/*minLod*/ nullptr, component, nullptr, range);
insertPoint->addInstruction(imageInstruction);
if (residencyCode) {
// Write the Residency Code
const auto status = createCompositeExtract(astContext.UnsignedIntTy,
imageInstruction, {0}, loc);
createStore(residencyCode, status, loc);
// Extract the real result from the struct
return createCompositeExtract(texelType, imageInstruction, {1}, loc);
}
return imageInstruction;
}
SpirvImageSparseTexelsResident *SpirvBuilder::createImageSparseTexelsResident(
SpirvInstruction *residentCode, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvImageSparseTexelsResident(
astContext.BoolTy, loc, residentCode, range);
insertPoint->addInstruction(inst);
return inst;
}
SpirvImageQuery *
SpirvBuilder::createImageQuery(spv::Op opcode, QualType resultType,
SourceLocation loc, SpirvInstruction *image,
SpirvInstruction *lod, SourceRange range) {
assert(insertPoint && "null insert point");
SpirvInstruction *lodParam = nullptr;
SpirvInstruction *coordinateParam = nullptr;
if (opcode == spv::Op::OpImageQuerySizeLod)
lodParam = lod;
if (opcode == spv::Op::OpImageQueryLod)
coordinateParam = lod;
auto *inst = new (context) SpirvImageQuery(opcode, resultType, loc, image,
lodParam, coordinateParam, range);
insertPoint->addInstruction(inst);
return inst;
}
SpirvSelect *SpirvBuilder::createSelect(QualType resultType,
SpirvInstruction *condition,
SpirvInstruction *trueValue,
SpirvInstruction *falseValue,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvSelect(resultType, loc, condition, trueValue, falseValue, range);
insertPoint->addInstruction(inst);
return inst;
}
void SpirvBuilder::createSwitch(
SpirvBasicBlock *mergeLabel, SpirvInstruction *selector,
SpirvBasicBlock *defaultLabel,
llvm::ArrayRef<std::pair<llvm::APInt, SpirvBasicBlock *>> target,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
// Create the OpSelectioMerege.
auto *selectionMerge = new (context) SpirvSelectionMerge(
loc, mergeLabel, spv::SelectionControlMask::MaskNone, range);
insertPoint->addInstruction(selectionMerge);
// Create the OpSwitch.
auto *switchInst =
new (context) SpirvSwitch(loc, selector, defaultLabel, target);
insertPoint->addInstruction(switchInst);
}
void SpirvBuilder::createKill(SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *kill = new (context) SpirvKill(loc, range);
insertPoint->addInstruction(kill);
}
void SpirvBuilder::createBranch(SpirvBasicBlock *targetLabel,
SourceLocation loc, SpirvBasicBlock *mergeBB,
SpirvBasicBlock *continueBB,
spv::LoopControlMask loopControl,
SourceRange range) {
assert(insertPoint && "null insert point");
if (mergeBB && continueBB) {
auto *loopMerge = new (context)
SpirvLoopMerge(loc, mergeBB, continueBB, loopControl, range);
insertPoint->addInstruction(loopMerge);
}
auto *branch = new (context) SpirvBranch(loc, targetLabel, range);
insertPoint->addInstruction(branch);
}
void SpirvBuilder::createConditionalBranch(
SpirvInstruction *condition, SpirvBasicBlock *trueLabel,
SpirvBasicBlock *falseLabel, SourceLocation loc,
SpirvBasicBlock *mergeLabel, SpirvBasicBlock *continueLabel,
spv::SelectionControlMask selectionControl,
spv::LoopControlMask loopControl, SourceRange range) {
assert(insertPoint && "null insert point");
if (mergeLabel) {
if (continueLabel) {
auto *loopMerge = new (context)
SpirvLoopMerge(loc, mergeLabel, continueLabel, loopControl, range);
insertPoint->addInstruction(loopMerge);
} else {
auto *selectionMerge = new (context)
SpirvSelectionMerge(loc, mergeLabel, selectionControl, range);
insertPoint->addInstruction(selectionMerge);
}
}
auto *branchConditional = new (context)
SpirvBranchConditional(loc, condition, trueLabel, falseLabel);
insertPoint->addInstruction(branchConditional);
}
void SpirvBuilder::createReturn(SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
insertPoint->addInstruction(new (context) SpirvReturn(loc, nullptr, range));
}
void SpirvBuilder::createReturnValue(SpirvInstruction *value,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
insertPoint->addInstruction(new (context) SpirvReturn(loc, value, range));
}
SpirvInstruction *
SpirvBuilder::createGLSLExtInst(QualType resultType, GLSLstd450 inst,
llvm::ArrayRef<SpirvInstruction *> operands,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *extInst = new (context) SpirvExtInst(
resultType, loc, getExtInstSet("GLSL.std.450"), inst, operands, range);
insertPoint->addInstruction(extInst);
return extInst;
}
SpirvInstruction *
SpirvBuilder::createGLSLExtInst(const SpirvType *resultType, GLSLstd450 inst,
llvm::ArrayRef<SpirvInstruction *> operands,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *extInst = new (context) SpirvExtInst(
/*QualType*/ {}, loc, getExtInstSet("GLSL.std.450"), inst, operands,
range);
extInst->setResultType(resultType);
insertPoint->addInstruction(extInst);
return extInst;
}
SpirvInstruction *SpirvBuilder::createNonSemanticDebugPrintfExtInst(
QualType resultType, NonSemanticDebugPrintfInstructions instId,
llvm::ArrayRef<SpirvInstruction *> operands, SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *extInst = new (context)
SpirvExtInst(resultType, loc, getExtInstSet("NonSemantic.DebugPrintf"),
instId, operands);
insertPoint->addInstruction(extInst);
return extInst;
}
void SpirvBuilder::createBarrier(spv::Scope memoryScope,
spv::MemorySemanticsMask memorySemantics,
llvm::Optional<spv::Scope> exec,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
SpirvBarrier *barrier = new (context)
SpirvBarrier(loc, memoryScope, memorySemantics, exec, range);
insertPoint->addInstruction(barrier);
}
SpirvInstruction *SpirvBuilder::createEmulatedBitFieldInsert(
QualType resultType, uint32_t baseTypeBitwidth, SpirvInstruction *base,
SpirvInstruction *insert, unsigned bitOffset, unsigned bitCount,
SourceLocation loc, SourceRange range) {
// The destination is a raw struct field, which can contain several bitfields:
// raw field: AAAABBBBCCCCCCCCDDDD
// To insert a new value for the field BBBB, we need to clear the B bits in
// the field, and insert the new values.
// Create a mask to clear B from the raw field.
// mask = (1 << bitCount) - 1
// raw field: AAAABBBBCCCCCCCCDDDD
// mask: 00000000000000001111
// cast mask to the an unsigned with the same bitwidth.
// mask = (unsigned dstType)mask
// Move the mask to B's position in the raw type.
// mask = mask << bitOffset
// raw field: AAAABBBBCCCCCCCCDDDD
// mask: 00001111000000000000
// Generate inverted mask to clear other bits in *insert*.
// notMask = ~mask
// raw field: AAAABBBBCCCCCCCCDDDD
// mask: 11110000111111111111
assert(bitCount <= 64 &&
"Bitfield insertion emulation can only insert at most 64 bits.");
auto maskTy =
astContext.getIntTypeForBitwidth(baseTypeBitwidth, /* signed= */ 0);
const uint64_t maskValue = ((1ull << bitCount) - 1ull) << bitOffset;
const uint64_t notMaskValue = ~maskValue;
auto *mask = getConstantInt(maskTy, llvm::APInt(baseTypeBitwidth, maskValue));
auto *notMask =
getConstantInt(maskTy, llvm::APInt(baseTypeBitwidth, notMaskValue));
auto *shiftOffset =
getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, bitOffset));
// base = base & MASK // Clear bits at B's position.
// input: AAAABBBBCCCCCCCCDDDD
// output: AAAA----CCCCCCCCDDDD
auto *clearedDst = createBinaryOp(spv::Op::OpBitwiseAnd, resultType, base,
notMask, loc, range);
// input: SSSSSSSSSSSSSSSSBBBB
// tmp = (dstType)SRC // Convert SRC to the base type.
// tmp = tmp << bitOffset // Move the SRC value to the correct bit offset.
// output: SSSSBBBB------------
// tmp = tmp & ~MASK // Clear any sign extension bits.
// output: ----BBBB------------
auto *castedSrc =
createUnaryOp(spv::Op::OpBitcast, resultType, insert, loc, range);
auto *shiftedSrc = createBinaryOp(spv::Op::OpShiftLeftLogical, resultType,
castedSrc, shiftOffset, loc, range);
auto *maskedSrc = createBinaryOp(spv::Op::OpBitwiseAnd, resultType,
shiftedSrc, mask, loc, range);
// base = base | tmp; // Insert B in the raw field.
// tmp: ----BBBB------------
// base: AAAA----CCCCCCCCDDDD
// output: AAAABBBBCCCCCCCCDDDD
auto *result = createBinaryOp(spv::Op::OpBitwiseOr, resultType, clearedDst,
maskedSrc, loc, range);
if (base->getResultType()) {
auto *dstTy = dyn_cast<IntegerType>(base->getResultType());
clearedDst->setResultType(dstTy);
shiftedSrc->setResultType(dstTy);
maskedSrc->setResultType(dstTy);
castedSrc->setResultType(dstTy);
result->setResultType(dstTy);
}
return result;
}
SpirvInstruction *
SpirvBuilder::createBitFieldInsert(QualType resultType, SpirvInstruction *base,
SpirvInstruction *insert, unsigned bitOffset,
unsigned bitCount, SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
uint32_t bitwidth = 0;
if (resultType == QualType({})) {
assert(base->hasResultType() && "No type information for bitfield.");
bitwidth = dyn_cast<IntegerType>(base->getResultType())->getBitwidth();
} else {
bitwidth = getElementSpirvBitwidth(astContext, resultType,
spirvOptions.enable16BitTypes);
}
if (bitwidth != 32)
return createEmulatedBitFieldInsert(resultType, bitwidth, base, insert,
bitOffset, bitCount, loc, range);
auto *insertOffset =
getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, bitOffset));
auto *insertCount =
getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, bitCount));
auto *inst = new (context) SpirvBitFieldInsert(resultType, loc, base, insert,
insertOffset, insertCount);
insertPoint->addInstruction(inst);
inst->setRValue(true);
return inst;
}
SpirvInstruction *SpirvBuilder::createEmulatedBitFieldExtract(
QualType resultType, uint32_t baseTypeBitwidth, SpirvInstruction *base,
unsigned bitOffset, unsigned bitCount, SourceLocation loc,
SourceRange range) {
assert(bitCount <= 64 &&
"Bitfield extraction emulation can only extract at most 64 bits.");
// The base is a raw struct field, which can contain several bitfields:
// raw field: AAAABBBBCCCCCCCCDDDD
// Extracting B means shifting it right until B's LSB is the basetype LSB.
// But first, we need to left shift until B's MSB becomes the basetype MSB:
// - is B is signed, its sign bits won't necessarily extend up to the
// basetype MSB.
// - meaning a right-shift could fail to sign-extend.
// - shifting left first, then right makes sure the sign extension happens.
// input: AAAABBBBCCCCCCCCDDDD
// output: BBBBCCCCCCCCDDDD0000
auto *leftShiftOffset =
getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, baseTypeBitwidth - bitOffset - bitCount));
auto *leftShift = createBinaryOp(spv::Op::OpShiftLeftLogical, resultType,
base, leftShiftOffset, loc, range);
// input: BBBBCCCCCCCCDDDD0000
// output: SSSSSSSSSSSSSSSSBBBB
auto *rightShiftOffset = getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, baseTypeBitwidth - bitCount));
auto *rightShift = createBinaryOp(spv::Op::OpShiftRightArithmetic, resultType,
leftShift, rightShiftOffset, loc, range);
if (resultType == QualType({})) {
auto baseType = dyn_cast<IntegerType>(base->getResultType());
leftShift->setResultType(baseType);
rightShift->setResultType(baseType);
}
return rightShift;
}
SpirvInstruction *
SpirvBuilder::createBitFieldExtract(QualType resultType, SpirvInstruction *base,
unsigned bitOffset, unsigned bitCount,
SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
uint32_t bitWidth = 0;
if (resultType == QualType({})) {
assert(base->hasResultType() && "No type information for bitfield.");
bitWidth = dyn_cast<IntegerType>(base->getResultType())->getBitwidth();
} else {
bitWidth = getElementSpirvBitwidth(astContext, resultType,
spirvOptions.enable16BitTypes);
}
if (bitWidth != 32)
return createEmulatedBitFieldExtract(resultType, bitWidth, base, bitOffset,
bitCount, loc, range);
auto *offset =
getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, bitOffset));
auto *count =
getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, bitCount));
auto *inst =
new (context) SpirvBitFieldExtract(resultType, loc, base, offset, count);
insertPoint->addInstruction(inst);
inst->setRValue(true);
return inst;
}
void SpirvBuilder::createEmitVertex(SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvEmitVertex(loc, range);
insertPoint->addInstruction(inst);
}
void SpirvBuilder::createEndPrimitive(SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvEndPrimitive(loc, range);
insertPoint->addInstruction(inst);
}
/// \brief Creates an OpEmitMeshTasksEXT instruction.
void SpirvBuilder::createEmitMeshTasksEXT(
SpirvInstruction *xDim, SpirvInstruction *yDim, SpirvInstruction *zDim,
SourceLocation loc, SpirvInstruction *payload, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvEmitMeshTasksEXT(xDim, yDim, zDim, payload, loc, range);
insertPoint->addInstruction(inst);
}
/// \brief Creates an OpSetMeshOutputsEXT instruction.
void SpirvBuilder::createSetMeshOutputsEXT(SpirvInstruction *vertCount,
SpirvInstruction *primCount,
SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst =
new (context) SpirvSetMeshOutputsEXT(vertCount, primCount, loc, range);
insertPoint->addInstruction(inst);
}
SpirvArrayLength *SpirvBuilder::createArrayLength(QualType resultType,
SourceLocation loc,
SpirvInstruction *structure,
uint32_t arrayMember,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvArrayLength(resultType, loc, structure, arrayMember, range);
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *
SpirvBuilder::createRayTracingOpsNV(spv::Op opcode, QualType resultType,
ArrayRef<SpirvInstruction *> operands,
SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *inst =
new (context) SpirvRayTracingOpNV(resultType, opcode, operands, loc);
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *
SpirvBuilder::createDemoteToHelperInvocation(SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvDemoteToHelperInvocation(loc);
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *
SpirvBuilder::createIsHelperInvocationEXT(QualType type, SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvIsHelperInvocationEXT(type, loc);
insertPoint->addInstruction(inst);
return inst;
}
SpirvDebugSource *SpirvBuilder::createDebugSource(llvm::StringRef file,
llvm::StringRef text) {
auto *inst = new (context) SpirvDebugSource(file, text);
mod->addDebugInfo(inst);
return inst;
}
SpirvDebugCompilationUnit *
SpirvBuilder::createDebugCompilationUnit(SpirvDebugSource *source) {
auto *inst = new (context) SpirvDebugCompilationUnit(
/*version*/ 1, /*DWARF version*/ 4, source);
mod->addDebugInfo(inst);
return inst;
}
void SpirvBuilder::createDebugEntryPoint(SpirvDebugFunction *ep,
SpirvDebugCompilationUnit *cu,
llvm::StringRef signature,
llvm::StringRef args) {
auto *inst = new (context) SpirvDebugEntryPoint(ep, cu, signature, args);
mod->addDebugInfo(inst);
}
SpirvDebugLexicalBlock *
SpirvBuilder::createDebugLexicalBlock(SpirvDebugSource *source, uint32_t line,
uint32_t column,
SpirvDebugInstruction *parent) {
assert(insertPoint && "null insert point");
auto *inst =
new (context) SpirvDebugLexicalBlock(source, line, column, parent);
mod->addDebugInfo(inst);
return inst;
}
SpirvDebugLocalVariable *SpirvBuilder::createDebugLocalVariable(
QualType debugQualType, llvm::StringRef varName, SpirvDebugSource *src,
uint32_t line, uint32_t column, SpirvDebugInstruction *parentScope,
uint32_t flags, llvm::Optional<uint32_t> argNumber) {
auto *inst = new (context) SpirvDebugLocalVariable(
debugQualType, varName, src, line, column, parentScope, flags, argNumber);
mod->addDebugInfo(inst);
return inst;
}
SpirvDebugGlobalVariable *SpirvBuilder::createDebugGlobalVariable(
QualType debugType, llvm::StringRef varName, SpirvDebugSource *src,
uint32_t line, uint32_t column, SpirvDebugInstruction *parentScope,
llvm::StringRef linkageName, SpirvVariable *var, uint32_t flags,
llvm::Optional<SpirvInstruction *> staticMemberDebugType) {
auto *inst = new (context) SpirvDebugGlobalVariable(
debugType, varName, src, line, column, parentScope, linkageName, var,
flags, staticMemberDebugType);
mod->addDebugInfo(inst);
return inst;
}
SpirvDebugInfoNone *SpirvBuilder::getOrCreateDebugInfoNone() {
if (debugNone)
return debugNone;
debugNone = new (context) SpirvDebugInfoNone();
mod->addDebugInfo(debugNone);
return debugNone;
}
SpirvDebugExpression *SpirvBuilder::getOrCreateNullDebugExpression() {
if (nullDebugExpr)
return nullDebugExpr;
nullDebugExpr = new (context) SpirvDebugExpression();
mod->addDebugInfo(nullDebugExpr);
return nullDebugExpr;
}
SpirvDebugDeclare *SpirvBuilder::createDebugDeclare(
SpirvDebugLocalVariable *dbgVar, SpirvInstruction *var, SourceLocation loc,
SourceRange range, llvm::Optional<SpirvDebugExpression *> dbgExpr) {
auto *decl = new (context)
SpirvDebugDeclare(dbgVar, var,
dbgExpr.hasValue() ? dbgExpr.getValue()
: getOrCreateNullDebugExpression(),
loc, range);
if (isa<SpirvFunctionParameter>(var)) {
assert(function && "found detached parameter");
function->addParameterDebugDeclare(decl);
} else {
assert(insertPoint && "null insert point");
insertPoint->addInstruction(decl);
}
return decl;
}
SpirvDebugFunction *SpirvBuilder::createDebugFunction(
const FunctionDecl *decl, llvm::StringRef name, SpirvDebugSource *src,
uint32_t line, uint32_t column, SpirvDebugInstruction *parentScope,
llvm::StringRef linkageName, uint32_t flags, uint32_t scopeLine,
SpirvFunction *fn) {
auto *inst = new (context) SpirvDebugFunction(
name, src, line, column, parentScope, linkageName, flags, scopeLine, fn);
mod->addDebugInfo(inst);
context.registerDebugFunctionForDecl(decl, inst);
return inst;
}
SpirvDebugFunctionDefinition *
SpirvBuilder::createDebugFunctionDef(SpirvDebugFunction *function,
SpirvFunction *fn) {
auto *inst = new (context) SpirvDebugFunctionDefinition(function, fn);
assert(insertPoint && "null insert point");
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *SpirvBuilder::createRayQueryOpsKHR(
spv::Op opcode, QualType resultType, ArrayRef<SpirvInstruction *> operands,
bool cullFlags, SourceLocation loc, SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvRayQueryOpKHR(resultType, opcode, operands, cullFlags, loc, range);
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *SpirvBuilder::createReadClock(SpirvInstruction *scope,
SourceLocation loc) {
assert(insertPoint && "null insert point");
assert(scope->getAstResultType()->isIntegerType());
auto *inst =
new (context) SpirvReadClock(astContext.UnsignedLongLongTy, scope, loc);
insertPoint->addInstruction(inst);
return inst;
}
SpirvInstruction *SpirvBuilder::createSpirvIntrInstExt(
uint32_t opcode, QualType retType,
llvm::ArrayRef<SpirvInstruction *> operands,
llvm::ArrayRef<llvm::StringRef> extensions, llvm::StringRef instSet,
llvm::ArrayRef<uint32_t> capablities, SourceLocation loc) {
assert(insertPoint && "null insert point");
SpirvExtInstImport *set =
(instSet.size() == 0) ? nullptr : getExtInstSet(instSet);
if (retType != QualType() && retType->isVoidType()) {
retType = QualType();
}
auto *inst = new (context) SpirvIntrinsicInstruction(
retType, opcode, operands, extensions, set, capablities, loc);
insertPoint->addInstruction(inst);
return inst;
}
void SpirvBuilder::createBeginInvocationInterlockEXT(SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvNullaryOp(spv::Op::OpBeginInvocationInterlockEXT, loc, range);
insertPoint->addInstruction(inst);
}
void SpirvBuilder::createEndInvocationInterlockEXT(SourceLocation loc,
SourceRange range) {
assert(insertPoint && "null insert point");
auto *inst = new (context)
SpirvNullaryOp(spv::Op::OpEndInvocationInterlockEXT, loc, range);
insertPoint->addInstruction(inst);
}
void SpirvBuilder::createRaytracingTerminateKHR(spv::Op opcode,
SourceLocation loc) {
assert(insertPoint && "null insert point");
auto *inst = new (context) SpirvRayTracingTerminateOpKHR(opcode, loc);
insertPoint->addInstruction(inst);
}
void SpirvBuilder::createCopyArrayInFxcCTBufferToClone(
const ArrayType *fxcCTBufferArrTy, SpirvInstruction *fxcCTBuffer,
const SpirvType *cloneType, SpirvInstruction *clone, SourceLocation loc) {
const SpirvPointerType *cloneElemPtrTy = nullptr;
const SpirvPointerType *fxcCTBufferElemPtrTy = nullptr;
if (auto *cloneArrTy = dyn_cast<ArrayType>(cloneType)) {
assert(fxcCTBufferArrTy->getElementCount() ==
cloneArrTy->getElementCount());
cloneElemPtrTy = context.getPointerType(cloneArrTy->getElementType(),
clone->getStorageClass());
fxcCTBufferElemPtrTy = context.getPointerType(
fxcCTBufferArrTy->getElementType(), fxcCTBuffer->getStorageClass());
} else if (auto *cloneVecTy = dyn_cast<VectorType>(cloneType)) {
// float1xN must be float[N] for CTBuffer data filling but it should be
// used as a vector of N floats in SPIR-V instructions.
assert(fxcCTBufferArrTy->getElementCount() ==
cloneVecTy->getElementCount());
cloneElemPtrTy = context.getPointerType(cloneVecTy->getElementType(),
clone->getStorageClass());
fxcCTBufferElemPtrTy = context.getPointerType(
fxcCTBufferArrTy->getElementType(), fxcCTBuffer->getStorageClass());
} else {
llvm_unreachable("Unexpected destination type");
}
for (uint32_t i = 0; i < fxcCTBufferArrTy->getElementCount(); ++i) {
auto *ptrToFxcCTBufferElem = createAccessChain(
fxcCTBufferElemPtrTy, fxcCTBuffer,
{getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, i))}, loc);
context.addToInstructionsWithLoweredType(ptrToFxcCTBufferElem);
auto *ptrToCloneElem = createAccessChain(
cloneElemPtrTy, clone,
{getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, i))}, loc);
context.addToInstructionsWithLoweredType(ptrToCloneElem);
createCopyInstructionsFromFxcCTBufferToClone(ptrToFxcCTBufferElem,
ptrToCloneElem);
}
}
void SpirvBuilder::createCopyStructInFxcCTBufferToClone(
const StructType *fxcCTBufferStructTy, SpirvInstruction *fxcCTBuffer,
const SpirvType *cloneType, SpirvInstruction *clone, SourceLocation loc) {
if (auto *cloneStructTy = dyn_cast<StructType>(cloneType)) {
auto fxcCTBufferFields = fxcCTBufferStructTy->getFields();
auto cloneFields = cloneStructTy->getFields();
assert(fxcCTBufferFields.size() == cloneFields.size());
for (uint32_t i = 0; i < fxcCTBufferFields.size(); ++i) {
auto *fxcCTBufferElemPtrTy = context.getPointerType(
fxcCTBufferFields[i].type, fxcCTBuffer->getStorageClass());
auto *ptrToFxcCTBufferElem = createAccessChain(
fxcCTBufferElemPtrTy, fxcCTBuffer,
{getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, i))}, loc);
context.addToInstructionsWithLoweredType(ptrToFxcCTBufferElem);
auto *cloneElemPtrTy =
context.getPointerType(cloneFields[i].type, clone->getStorageClass());
auto *ptrToCloneElem = createAccessChain(
cloneElemPtrTy, clone,
{getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, i))}, loc);
context.addToInstructionsWithLoweredType(ptrToCloneElem);
createCopyInstructionsFromFxcCTBufferToClone(ptrToFxcCTBufferElem,
ptrToCloneElem);
}
} else {
llvm_unreachable("Unexpected destination type");
}
}
void SpirvBuilder::createCopyInstructionsFromFxcCTBufferToClone(
SpirvInstruction *fxcCTBuffer, SpirvInstruction *clone) {
assert(clone != nullptr && fxcCTBuffer != nullptr);
assert(clone->getResultType() != nullptr &&
fxcCTBuffer->getResultType() != nullptr);
assert(fxcCTBuffer->getLayoutRule() == SpirvLayoutRule::FxcCTBuffer &&
clone->getLayoutRule() == SpirvLayoutRule::Void);
auto *clonePtrType = dyn_cast<SpirvPointerType>(clone->getResultType());
auto *fxcCTBufferPtrType =
dyn_cast<SpirvPointerType>(fxcCTBuffer->getResultType());
assert(clonePtrType != nullptr && fxcCTBufferPtrType != nullptr);
auto *cloneType = clonePtrType->getPointeeType();
auto *fxcCTBufferType = fxcCTBufferPtrType->getPointeeType();
assert(cloneType != nullptr && fxcCTBufferType != nullptr);
auto loc = fxcCTBuffer->getSourceLocation();
if (auto *fxcCTBufferArrTy = dyn_cast<ArrayType>(fxcCTBufferType)) {
createCopyArrayInFxcCTBufferToClone(fxcCTBufferArrTy, fxcCTBuffer,
cloneType, clone, loc);
} else if (auto *fxcCTBufferStructTy =
dyn_cast<StructType>(fxcCTBufferType)) {
createCopyStructInFxcCTBufferToClone(fxcCTBufferStructTy, fxcCTBuffer,
cloneType, clone, loc);
} else if (fxcCTBufferType->getKind() == SpirvType::TK_Bool ||
fxcCTBufferType->getKind() == SpirvType::TK_Integer ||
fxcCTBufferType->getKind() == SpirvType::TK_Float ||
fxcCTBufferType->getKind() == SpirvType::TK_Vector ||
fxcCTBufferType->getKind() == SpirvType::TK_Matrix) {
auto *load = createLoad(fxcCTBufferType, fxcCTBuffer, loc);
context.addToInstructionsWithLoweredType(load);
createStore(clone, load, loc);
} else {
llvm_unreachable(
"We expect only composite types are accessed with indexes");
}
}
void SpirvBuilder::switchInsertPointToModuleInit() {
if (moduleInitInsertPoint == nullptr) {
moduleInit = createSpirvFunction(astContext.VoidTy, SourceLocation(),
"module.init", false);
moduleInitInsertPoint = new (context) SpirvBasicBlock("module.init.bb");
moduleInit->addBasicBlock(moduleInitInsertPoint);
}
assert(moduleInitInsertPoint && "null module init insert point");
insertPoint = moduleInitInsertPoint;
}
SpirvVariable *SpirvBuilder::createCloneVarForFxcCTBuffer(
QualType astType, const SpirvType *spvType, SpirvInstruction *var) {
SpirvVariable *clone = nullptr;
if (astType != QualType({})) {
clone = addModuleVar(astType, spv::StorageClass::Private, var->isPrecise(),
var->isNoninterpolated(), var->getDebugName(),
llvm::None, var->getSourceLocation());
} else {
if (const auto *ty = dyn_cast<StructType>(spvType)) {
spvType = context.getStructType(ty->getFields(), ty->getName(),
ty->isReadOnly(),
StructInterfaceType::InternalStorage);
} else if (const auto *ty = dyn_cast<HybridStructType>(spvType)) {
spvType = context.getHybridStructType(
ty->getFields(), ty->getName(), ty->isReadOnly(),
StructInterfaceType::InternalStorage);
}
clone = addModuleVar(spvType, spv::StorageClass::Private, var->isPrecise(),
var->isNoninterpolated(), var->getDebugName(),
llvm::None, var->getSourceLocation());
}
clone->setLayoutRule(SpirvLayoutRule::Void);
return clone;
}
SpirvInstruction *
SpirvBuilder::initializeCloneVarForFxcCTBuffer(SpirvInstruction *instr) {
assert(instr);
if (instr == nullptr)
return nullptr;
if (instr->getLayoutRule() != SpirvLayoutRule::FxcCTBuffer)
return nullptr;
SpirvVariable *var = dyn_cast<SpirvVariable>(instr);
if (var == nullptr)
return nullptr;
// If we already generated a clone for the given CTBuffer, return it.
auto cloneItr = fxcCTBufferToClone.find(var);
if (cloneItr != fxcCTBufferToClone.end())
return cloneItr->second;
auto astType = var->getAstResultType();
const auto *spvType = var->getResultType();
LowerTypeVisitor lowerTypeVisitor(astContext, context, spirvOptions, *this);
lowerTypeVisitor.visitInstruction(var);
context.addToInstructionsWithLoweredType(instr);
if (!lowerTypeVisitor.useSpvArrayForHlslMat1xN()) {
return nullptr;
}
auto *oldInsertPoint = insertPoint;
switchInsertPointToModuleInit();
SpirvVariable *clone = createCloneVarForFxcCTBuffer(astType, spvType, var);
lowerTypeVisitor.visitInstruction(clone);
context.addToInstructionsWithLoweredType(clone);
createCopyInstructionsFromFxcCTBufferToClone(var, clone);
fxcCTBufferToClone[var] = clone;
insertPoint = oldInsertPoint;
return clone;
}
void SpirvBuilder::addModuleProcessed(llvm::StringRef process) {
mod->addModuleProcessed(new (context) SpirvModuleProcessed({}, process));
}
SpirvExtInstImport *SpirvBuilder::getExtInstSet(llvm::StringRef extName) {
SpirvExtInstImport *set = mod->getExtInstSet(extName);
if (!set) {
// The extended instruction set is likely required for several different
// reasons. We can't pinpoint the source location for one specific function.
set = new (context) SpirvExtInstImport(/*SourceLocation*/ {}, extName);
mod->addExtInstSet(set);
}
return set;
}
SpirvExtInstImport *SpirvBuilder::getDebugInfoExtInstSet(bool vulkanDebugInfo) {
return getExtInstSet(vulkanDebugInfo ? "NonSemantic.Shader.DebugInfo.100"
: "OpenCL.DebugInfo.100");
}
SpirvVariable *SpirvBuilder::addStageIOVar(QualType type,
spv::StorageClass storageClass,
llvm::StringRef name, bool isPrecise,
bool isNointerp,
SourceLocation loc) {
// Note: We store the underlying type in the variable, *not* the pointer type.
auto *var = new (context)
SpirvVariable(type, loc, storageClass, isPrecise, isNointerp);
var->setDebugName(name);
mod->addVariable(var);
return var;
}
SpirvVariable *SpirvBuilder::addVarForHelperInvocation(QualType type,
bool isPrecise,
SourceLocation loc) {
SpirvVariable *var = addModuleVar(type, spv::StorageClass::Private, isPrecise,
false, "HelperInvocation", llvm::None, loc);
auto *oldInsertPoint = insertPoint;
switchInsertPointToModuleInit();
SpirvInstruction *isHelperInvocation = createIsHelperInvocationEXT(type, loc);
createStore(var, isHelperInvocation, loc, SourceRange());
insertPoint = oldInsertPoint;
return var;
}
SpirvVariable *SpirvBuilder::addStageBuiltinVar(QualType type,
spv::StorageClass storageClass,
spv::BuiltIn builtin,
bool isPrecise,
SourceLocation loc) {
// If the built-in variable has already been added (via a built-in alias),
// return the existing variable.
auto found = std::find_if(
builtinVars.begin(), builtinVars.end(),
[storageClass, builtin](const BuiltInVarInfo &varInfo) {
return varInfo.sc == storageClass && varInfo.builtIn == builtin;
});
if (found != builtinVars.end()) {
return found->variable;
}
// Note: We store the underlying type in the variable, *not* the pointer type.
auto *var =
new (context) SpirvVariable(type, loc, storageClass, isPrecise, false);
mod->addVariable(var);
// Decorate with the specified Builtin
auto *decor = new (context) SpirvDecoration(
loc, var, spv::Decoration::BuiltIn, {static_cast<uint32_t>(builtin)});
mod->addDecoration(decor);
// Add variable to cache.
builtinVars.emplace_back(storageClass, builtin, var);
return var;
}
SpirvVariable *SpirvBuilder::addModuleVar(
QualType type, spv::StorageClass storageClass, bool isPrecise,
bool isNointerp, llvm::StringRef name,
llvm::Optional<SpirvInstruction *> init, SourceLocation loc) {
assert(storageClass != spv::StorageClass::Function);
// Note: We store the underlying type in the variable, *not* the pointer type.
auto *var = new (context)
SpirvVariable(type, loc, storageClass, isPrecise, isNointerp,
init.hasValue() ? init.getValue() : nullptr);
var->setDebugName(name);
mod->addVariable(var);
return var;
}
SpirvVariable *SpirvBuilder::addModuleVar(
const SpirvType *type, spv::StorageClass storageClass, bool isPrecise,
bool isNointerp, llvm::StringRef name,
llvm::Optional<SpirvInstruction *> init, SourceLocation loc) {
assert(storageClass != spv::StorageClass::Function);
// Note: We store the underlying type in the variable, *not* the pointer type.
auto *var = new (context)
SpirvVariable(type, loc, storageClass, isPrecise, isNointerp,
init.hasValue() ? init.getValue() : nullptr);
var->setResultType(type);
var->setDebugName(name);
mod->addVariable(var);
return var;
}
void SpirvBuilder::decorateLocation(SpirvInstruction *target,
uint32_t location) {
auto *decor =
new (context) SpirvDecoration(target->getSourceLocation(), target,
spv::Decoration::Location, {location});
mod->addDecoration(decor);
}
void SpirvBuilder::decorateComponent(SpirvInstruction *target,
uint32_t component) {
// Based on the SPIR-V spec, 'Component' decoration must be a member of a
// struct or memory object declaration. Since we do not have a pointer type in
// HLSL, we always convert a variable with 'Component' decoration as a part of
// a struct.
auto *decor =
new (context) SpirvDecoration(target->getSourceLocation(), target,
spv::Decoration::Component, {component});
mod->addDecoration(decor);
}
void SpirvBuilder::decorateIndex(SpirvInstruction *target, uint32_t index,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::Index, {index});
mod->addDecoration(decor);
}
void SpirvBuilder::decorateDSetBinding(SpirvVariable *target,
uint32_t setNumber,
uint32_t bindingNumber) {
const SourceLocation srcLoc = target->getSourceLocation();
auto *dset = new (context) SpirvDecoration(
srcLoc, target, spv::Decoration::DescriptorSet, {setNumber});
mod->addDecoration(dset);
auto *binding = new (context) SpirvDecoration(
srcLoc, target, spv::Decoration::Binding, {bindingNumber});
target->setDescriptorSetNo(setNumber);
target->setBindingNo(bindingNumber);
// If the variable has the [[vk::combinedImageSampler]] attribute, we keep
// setNumber and bindingNumber pair to combine the image and the sampler with
// with the pair. The combining process will be conducted by spirv-opt
// --convert-to-sampled-image pass.
if (context.getVkImageFeaturesForSpirvVariable(target)
.isCombinedImageSampler) {
context.registerResourceInfoForSampledImage(target->getAstResultType(),
setNumber, bindingNumber);
}
mod->addDecoration(binding);
}
void SpirvBuilder::decorateSpecId(SpirvInstruction *target, uint32_t specId,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::SpecId, {specId});
mod->addDecoration(decor);
}
void SpirvBuilder::decorateInputAttachmentIndex(SpirvInstruction *target,
uint32_t indexNumber,
SourceLocation srcLoc) {
auto *decor = new (context) SpirvDecoration(
srcLoc, target, spv::Decoration::InputAttachmentIndex, {indexNumber});
mod->addDecoration(decor);
}
void SpirvBuilder::decorateCounterBuffer(SpirvInstruction *mainBuffer,
SpirvInstruction *counterBuffer,
SourceLocation srcLoc) {
if (spirvOptions.enableReflect) {
auto *decor = new (context) SpirvDecoration(
srcLoc, mainBuffer, spv::Decoration::HlslCounterBufferGOOGLE,
{counterBuffer});
mod->addDecoration(decor);
}
}
void SpirvBuilder::decorateHlslSemantic(SpirvInstruction *target,
llvm::StringRef semantic,
llvm::Optional<uint32_t> memberIdx) {
if (spirvOptions.enableReflect) {
auto *decor = new (context) SpirvDecoration(
target->getSourceLocation(), target,
spv::Decoration::HlslSemanticGOOGLE, semantic, memberIdx);
mod->addDecoration(decor);
}
}
void SpirvBuilder::decorateCentroid(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::Centroid);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateFlat(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::Flat);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateNoPerspective(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::NoPerspective);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateSample(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::Sample);
mod->addDecoration(decor);
}
void SpirvBuilder::decoratePatch(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::Patch);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateNoContraction(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::NoContraction);
mod->addDecoration(decor);
}
void SpirvBuilder::decoratePerPrimitiveNV(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::PerPrimitiveNV);
mod->addDecoration(decor);
}
void SpirvBuilder::decoratePerTaskNV(SpirvInstruction *target, uint32_t offset,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::PerTaskNV);
mod->addDecoration(decor);
decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::Offset, {offset});
mod->addDecoration(decor);
}
void SpirvBuilder::decoratePerVertexKHR(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor = new (context)
SpirvDecoration(srcLoc, target, spv::Decoration::PerVertexKHR);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateCoherent(SpirvInstruction *target,
SourceLocation srcLoc) {
auto *decor =
new (context) SpirvDecoration(srcLoc, target, spv::Decoration::Coherent);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateLinkage(SpirvInstruction *targetInst,
SpirvFunction *targetFunc,
llvm::StringRef name,
spv::LinkageType linkageType,
SourceLocation srcLoc) {
// We have to set a decoration for the linkage of a global variable or a
// function, but we cannot set them at the same time.
assert((targetInst == nullptr) != (targetFunc == nullptr));
SmallVector<uint32_t, 4> operands;
const auto &stringWords = string::encodeSPIRVString(name);
operands.insert(operands.end(), stringWords.begin(), stringWords.end());
operands.push_back(static_cast<uint32_t>(linkageType));
SpirvDecoration *decor = nullptr;
if (targetInst) {
decor = new (context) SpirvDecoration(
srcLoc, targetInst, spv::Decoration::LinkageAttributes, operands);
} else {
decor = new (context) SpirvDecoration(
srcLoc, targetFunc, spv::Decoration::LinkageAttributes, operands);
}
assert(decor != nullptr);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateWithLiterals(SpirvInstruction *targetInst,
unsigned decorate,
llvm::ArrayRef<unsigned> literals,
SourceLocation srcLoc) {
SpirvDecoration *decor = new (context) SpirvDecoration(
srcLoc, targetInst, static_cast<spv::Decoration>(decorate), literals);
assert(decor != nullptr);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateWithIds(SpirvInstruction *targetInst,
unsigned decorate,
llvm::ArrayRef<SpirvInstruction *> ids,
SourceLocation srcLoc) {
SpirvDecoration *decor = new (context) SpirvDecoration(
srcLoc, targetInst, static_cast<spv::Decoration>(decorate), ids);
assert(decor != nullptr);
mod->addDecoration(decor);
}
void SpirvBuilder::decorateWithStrings(
SpirvInstruction *target, unsigned decorate,
llvm::ArrayRef<llvm::StringRef> strLiteral, SourceLocation srcLoc) {
auto *decor = new (context) SpirvDecoration(
srcLoc, target, static_cast<spv::Decoration>(decorate), strLiteral);
mod->addDecoration(decor);
}
SpirvConstant *SpirvBuilder::getConstantInt(QualType type, llvm::APInt value,
bool specConst) {
// We do not reuse existing constant integers. Just create a new one.
auto *intConst = new (context) SpirvConstantInteger(type, value, specConst);
mod->addConstant(intConst);
return intConst;
}
SpirvConstant *SpirvBuilder::getConstantFloat(QualType type,
llvm::APFloat value,
bool specConst) {
// We do not reuse existing constant floats. Just create a new one.
auto *floatConst = new (context) SpirvConstantFloat(type, value, specConst);
mod->addConstant(floatConst);
return floatConst;
}
SpirvConstant *SpirvBuilder::getConstantBool(bool value, bool specConst) {
// We do not care about making unique constants at this point.
auto *boolConst =
new (context) SpirvConstantBoolean(astContext.BoolTy, value, specConst);
mod->addConstant(boolConst);
return boolConst;
}
SpirvConstant *
SpirvBuilder::getConstantComposite(QualType compositeType,
llvm::ArrayRef<SpirvConstant *> constituents,
bool specConst) {
// We do not care about making unique constants at this point.
auto *compositeConst = new (context)
SpirvConstantComposite(compositeType, constituents, specConst);
mod->addConstant(compositeConst);
return compositeConst;
}
SpirvConstant *SpirvBuilder::getConstantNull(QualType type) {
// We do not care about making unique constants at this point.
auto *nullConst = new (context) SpirvConstantNull(type);
mod->addConstant(nullConst);
return nullConst;
}
SpirvUndef *SpirvBuilder::getUndef(QualType type) {
// We do not care about making unique constants at this point.
auto *undef = new (context) SpirvUndef(type);
mod->addUndef(undef);
return undef;
}
SpirvString *SpirvBuilder::createString(llvm::StringRef str) {
// Create a SpirvString instruction
auto *instr = new (context) SpirvString(/* SourceLocation */ {}, str);
instr->setRValue();
if (str.empty())
emptyString = instr;
else
stringLiterals[str.str()] = instr;
mod->addString(instr);
return instr;
}
SpirvString *SpirvBuilder::getString(llvm::StringRef str) {
// Reuse an existing instruction if possible.
if (str.empty()) {
if (emptyString)
return emptyString;
} else {
auto iter = stringLiterals.find(str.str());
if (iter != stringLiterals.end())
return iter->second;
}
return createString(str);
}
const HybridPointerType *
SpirvBuilder::getPhysicalStorageBufferType(QualType pointee) {
return context.getPointerType(pointee,
spv::StorageClass::PhysicalStorageBuffer);
}
const SpirvPointerType *
SpirvBuilder::getPhysicalStorageBufferType(const SpirvType *pointee) {
return context.getPointerType(pointee,
spv::StorageClass::PhysicalStorageBuffer);
}
void SpirvBuilder::addModuleInitCallToEntryPoints() {
if (moduleInit == nullptr)
return;
for (auto *entry : mod->getEntryPoints()) {
auto *instruction = new (context)
SpirvFunctionCall(astContext.VoidTy, /* SourceLocation */ {},
moduleInit, /* params */ {});
instruction->setRValue(true);
entry->getEntryPoint()->addFirstInstruction(instruction);
}
}
void SpirvBuilder::setPerVertexInterpMode(bool b) {
mod->setPerVertexInterpMode(b);
}
bool SpirvBuilder::isPerVertexInterpMode() {
return mod->isPerVertexInterpMode();
}
void SpirvBuilder::addPerVertexStgInputFuncVarEntry(SpirvInstruction *k,
SpirvInstruction *v) {
perVertexInputVarMap[k] = v;
}
SpirvInstruction *SpirvBuilder::getPerVertexStgInput(SpirvInstruction *k) {
return perVertexInputVarMap.lookup(k);
}
void SpirvBuilder::endModuleInitFunction() {
if (moduleInitInsertPoint == nullptr ||
moduleInitInsertPoint->hasTerminator()) {
return;
}
auto *oldInsertPoint = insertPoint;
switchInsertPointToModuleInit();
createReturn(/* SourceLocation */ {});
insertPoint = oldInsertPoint;
mod->addFunctionToListOfSortedModuleFunctions(moduleInit);
}
std::vector<uint32_t> SpirvBuilder::takeModule() {
endModuleInitFunction();
addModuleInitCallToEntryPoints();
// Run necessary visitor passes first
LiteralTypeVisitor literalTypeVisitor(astContext, context, spirvOptions);
LowerTypeVisitor lowerTypeVisitor(astContext, context, spirvOptions, *this);
CapabilityVisitor capabilityVisitor(astContext, context, spirvOptions, *this,
featureManager);
RelaxedPrecisionVisitor relaxedPrecisionVisitor(context, spirvOptions);
PreciseVisitor preciseVisitor(context, spirvOptions);
NonUniformVisitor nonUniformVisitor(context, spirvOptions);
RemoveBufferBlockVisitor removeBufferBlockVisitor(
astContext, context, spirvOptions, featureManager);
EmitVisitor emitVisitor(astContext, context, spirvOptions, featureManager);
// pervertex inputs refine
if (context.isPS()) {
PervertexInputVisitor pervertexInputVisitor(*this, astContext, context,
spirvOptions);
mod->invokeVisitor(&pervertexInputVisitor);
}
mod->invokeVisitor(&literalTypeVisitor, true);
// Propagate NonUniform decorations
mod->invokeVisitor(&nonUniformVisitor);
// Lower types
mod->invokeVisitor(&lowerTypeVisitor);
// Generate debug types (if needed)
if (spirvOptions.debugInfoRich) {
DebugTypeVisitor debugTypeVisitor(astContext, context, spirvOptions, *this,
lowerTypeVisitor);
SortDebugInfoVisitor sortDebugInfoVisitor(context, spirvOptions);
mod->invokeVisitor(&debugTypeVisitor);
mod->invokeVisitor(&sortDebugInfoVisitor);
}
// Add necessary capabilities and extensions
mod->invokeVisitor(&capabilityVisitor);
// Propagate RelaxedPrecision decorations
mod->invokeVisitor(&relaxedPrecisionVisitor);
// Propagate NoContraction decorations
mod->invokeVisitor(&preciseVisitor, true);
// Remove BufferBlock decoration if necessary (this decoration is deprecated
// after SPIR-V 1.3).
mod->invokeVisitor(&removeBufferBlockVisitor);
// Emit SPIR-V
mod->invokeVisitor(&emitVisitor);
return emitVisitor.takeBinary();
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/ConstEvaluator.cpp | //===------- ConstEvaluator.cpp ----- Translate Constants -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements methods for translating AST expressions to SPIR-V
// constants.
//
//===----------------------------------------------------------------------===//
#include "ConstEvaluator.h"
namespace clang {
namespace spirv {
/// Returns true iff the given expression is a literal integer that cannot be
/// represented in a 32-bit integer type or a literal float that cannot be
/// represented in a 32-bit float type without losing info. Returns false
/// otherwise.
bool isLiteralLargerThan32Bits(const Expr *expr) {
if (const auto *intLiteral = dyn_cast<IntegerLiteral>(expr)) {
const bool isSigned = expr->getType()->isSignedIntegerType();
const llvm::APInt &value = intLiteral->getValue();
return (isSigned && !value.isSignedIntN(32)) ||
(!isSigned && !value.isIntN(32));
}
if (const auto *floatLiteral = dyn_cast<FloatingLiteral>(expr)) {
llvm::APFloat value = floatLiteral->getValue();
const auto &semantics = value.getSemantics();
// regular 'half' and 'float' can be represented in 32 bits.
if (&semantics == &llvm::APFloat::IEEEsingle ||
&semantics == &llvm::APFloat::IEEEhalf)
return true;
// See if 'double' value can be represented in 32 bits without losing info.
bool losesInfo = false;
const auto convertStatus =
value.convert(llvm::APFloat::IEEEsingle,
llvm::APFloat::rmNearestTiesToEven, &losesInfo);
if (convertStatus != llvm::APFloat::opOK &&
convertStatus != llvm::APFloat::opInexact)
return true;
}
return false;
}
SpirvConstant *ConstEvaluator::translateAPValue(const APValue &value,
const QualType targetType,
bool isSpecConstantMode) {
SpirvConstant *result = nullptr;
if (targetType->isBooleanType()) {
result = spvBuilder.getConstantBool(value.getInt().getBoolValue(),
isSpecConstantMode);
} else if (targetType->isIntegralOrEnumerationType()) {
result = translateAPInt(value.getInt(), targetType, isSpecConstantMode);
} else if (targetType->isFloatingType()) {
result = translateAPFloat(value.getFloat(), targetType, isSpecConstantMode);
} else if (hlsl::IsHLSLVecType(targetType)) {
const QualType elemType = hlsl::GetHLSLVecElementType(targetType);
const auto numElements = value.getVectorLength();
// Special case for vectors of size 1. SPIR-V doesn't support this vector
// size so we need to translate it to scalar values.
if (numElements == 1) {
result =
translateAPValue(value.getVectorElt(0), elemType, isSpecConstantMode);
} else {
llvm::SmallVector<SpirvConstant *, 4> elements;
for (uint32_t i = 0; i < numElements; ++i) {
elements.push_back(translateAPValue(value.getVectorElt(i), elemType,
isSpecConstantMode));
}
result = spvBuilder.getConstantComposite(targetType, elements);
}
}
if (result)
return result;
emitError("APValue of type %0 unimplemented", {}) << value.getKind();
return 0;
}
SpirvConstant *ConstEvaluator::translateAPInt(const llvm::APInt &intValue,
QualType targetType,
bool isSpecConstantMode) {
return spvBuilder.getConstantInt(targetType, intValue, isSpecConstantMode);
}
SpirvConstant *ConstEvaluator::translateAPFloat(llvm::APFloat floatValue,
QualType targetType,
bool isSpecConstantMode) {
return spvBuilder.getConstantFloat(targetType, floatValue,
isSpecConstantMode);
}
SpirvConstant *ConstEvaluator::tryToEvaluateAsInt32(const llvm::APInt &intValue,
bool isSigned) {
if (isSigned && intValue.isSignedIntN(32)) {
return spvBuilder.getConstantInt(astContext.IntTy, intValue);
}
if (!isSigned && intValue.isIntN(32)) {
return spvBuilder.getConstantInt(astContext.UnsignedIntTy, intValue);
}
// Couldn't evaluate as a 32-bit int without losing information.
return nullptr;
}
SpirvConstant *
ConstEvaluator::tryToEvaluateAsFloat32(const llvm::APFloat &floatValue,
bool isSpecConstantMode) {
const auto &semantics = floatValue.getSemantics();
// If the given value is already a 32-bit float, there is no need to convert.
if (&semantics == &llvm::APFloat::IEEEsingle) {
return spvBuilder.getConstantFloat(astContext.FloatTy, floatValue,
isSpecConstantMode);
}
// Try to see if this literal float can be represented in 32-bit.
// Since the convert function below may modify the fp value, we call it on a
// temporary copy.
llvm::APFloat eval = floatValue;
bool losesInfo = false;
const auto convertStatus =
eval.convert(llvm::APFloat::IEEEsingle,
llvm::APFloat::rmNearestTiesToEven, &losesInfo);
if (convertStatus == llvm::APFloat::opOK && !losesInfo)
return spvBuilder.getConstantFloat(astContext.FloatTy,
llvm::APFloat(eval.convertToFloat()));
// Couldn't evaluate as a 32-bit float without losing information.
return nullptr;
}
SpirvConstant *ConstEvaluator::tryToEvaluateAsConst(const Expr *expr,
bool isSpecConstantMode) {
Expr::EvalResult evalResult;
if (expr->EvaluateAsRValue(evalResult, astContext) &&
!evalResult.HasSideEffects) {
return translateAPValue(evalResult.Val, expr->getType(),
isSpecConstantMode);
}
return nullptr;
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvEmitter.h | //===------- SpirvEmitter.h - SPIR-V Binary Code Emitter --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file defines a SPIR-V emitter class that takes in HLSL AST and emits
// SPIR-V binary words.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_SPIRVEMITTER_H
#define LLVM_CLANG_LIB_SPIRV_SPIRVEMITTER_H
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "dxc/DXIL/DxilShaderModel.h"
#include "dxc/HlslIntrinsicOp.h"
#include "spirv/unified1/GLSL.std.450.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "clang/SPIRV/SpirvContext.h"
#include "llvm/ADT/STLExtras.h"
#include "ConstEvaluator.h"
#include "DeclResultIdMapper.h"
#include "spirv-tools/optimizer.hpp"
namespace spvtools {
namespace opt {
// A struct for a pair of descriptor set and binding.
struct DescriptorSetAndBinding {
uint32_t descriptor_set;
uint32_t binding;
};
} // namespace opt
} // namespace spvtools
namespace clang {
namespace spirv {
/// SPIR-V emitter class. It consumes the HLSL AST and emits SPIR-V words.
///
/// This class only overrides the HandleTranslationUnit() method; Traversing
/// through the AST is done manually instead of using ASTConsumer's harness.
class SpirvEmitter : public ASTConsumer {
public:
SpirvEmitter(CompilerInstance &ci);
void HandleTranslationUnit(ASTContext &context) override;
ASTContext &getASTContext() { return astContext; }
SpirvBuilder &getSpirvBuilder() { return spvBuilder; }
SpirvContext &getSpirvContext() { return spvContext; }
DiagnosticsEngine &getDiagnosticsEngine() { return diags; }
CompilerInstance &getCompilerInstance() { return theCompilerInstance; }
SpirvCodeGenOptions &getSpirvOptions() { return spirvOptions; }
/// \brief If DebugSource and DebugCompilationUnit for loc are already
/// created, we just return RichDebugInfo containing it. Otherwise,
/// create DebugSource and DebugCompilationUnit for loc and return it.
RichDebugInfo *getOrCreateRichDebugInfo(const SourceLocation &loc);
void doDecl(const Decl *decl);
void doStmt(const Stmt *stmt, llvm::ArrayRef<const Attr *> attrs = {});
SpirvInstruction *doExpr(const Expr *expr, SourceRange rangeOverride = {});
/// Processes the given expression and emits SPIR-V instructions. If the
/// result is a GLValue, does an additional load.
///
/// This method is useful for cases where ImplicitCastExpr (LValueToRValue) is
/// missing when using an lvalue as rvalue in the AST, e.g., DeclRefExpr will
/// not be wrapped in ImplicitCastExpr (LValueToRValue) when appearing in
/// HLSLVectorElementExpr since the generated HLSLVectorElementExpr itself can
/// be lvalue or rvalue.
SpirvInstruction *loadIfGLValue(const Expr *expr,
SourceRange rangeOverride = {});
/// Casts the given value from fromType to toType. fromType and toType should
/// both be scalar or vector types of the same size.
SpirvInstruction *castToType(SpirvInstruction *value, QualType fromType,
QualType toType, SourceLocation,
SourceRange range = {});
/// Returns true if the given VarDecl will be translated into a SPIR-V
/// variable not in the Private or Function storage class.
static inline bool isExternalVar(const VarDecl *var) {
// Class static variables should be put in the Private storage class.
// groupshared variables are allowed to be declared as "static". But we
// still need to put them in the Workgroup storage class. That is, when
// seeing "static groupshared", ignore "static".
return var->hasExternalFormalLinkage()
? !var->isStaticDataMember()
: (var->getAttr<HLSLGroupSharedAttr>() != nullptr);
}
/// Create SpirvIntrinsicInstruction for arbitrary SPIR-V instructions
/// specified by [[vk::ext_instruction(..)]] or [[vk::ext_type_def(..)]]
SpirvInstruction *
createSpirvIntrInstExt(llvm::ArrayRef<const Attr *> attrs, QualType retType,
llvm::ArrayRef<SpirvInstruction *> spvArgs,
bool isInstr, SourceLocation loc);
/// \brief Negates to get the additive inverse of SV_Position.y if requested.
SpirvInstruction *invertYIfRequested(SpirvInstruction *position,
SourceLocation loc,
SourceRange range = {});
private:
void doFunctionDecl(const FunctionDecl *decl);
void doVarDecl(const VarDecl *decl);
void doRecordDecl(const RecordDecl *decl);
void doClassTemplateDecl(const ClassTemplateDecl *classTemplateDecl);
void doEnumDecl(const EnumDecl *decl);
void doHLSLBufferDecl(const HLSLBufferDecl *decl);
void doImplicitDecl(const Decl *decl);
void doBreakStmt(const BreakStmt *stmt);
void doDiscardStmt(const DiscardStmt *stmt);
inline void doDeclStmt(const DeclStmt *stmt);
void doForStmt(const ForStmt *, llvm::ArrayRef<const Attr *> attrs = {});
void doIfStmt(const IfStmt *ifStmt, llvm::ArrayRef<const Attr *> attrs = {});
void doReturnStmt(const ReturnStmt *stmt);
void doSwitchStmt(const SwitchStmt *stmt,
llvm::ArrayRef<const Attr *> attrs = {});
void doWhileStmt(const WhileStmt *, llvm::ArrayRef<const Attr *> attrs = {});
void doDoStmt(const DoStmt *, llvm::ArrayRef<const Attr *> attrs = {});
void doContinueStmt(const ContinueStmt *);
SpirvInstruction *doArraySubscriptExpr(const ArraySubscriptExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doBinaryOperator(const BinaryOperator *expr);
SpirvInstruction *doCallExpr(const CallExpr *callExpr,
SourceRange rangeOverride = {});
SpirvInstruction *doCastExpr(const CastExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doCompoundAssignOperator(const CompoundAssignOperator *);
SpirvInstruction *doConditionalOperator(const ConditionalOperator *expr);
SpirvInstruction *doConditional(const Expr *expr, const Expr *cond,
const Expr *falseExpr, const Expr *trueExpr);
SpirvInstruction *
doShortCircuitedConditionalOperator(const ConditionalOperator *expr);
SpirvInstruction *doCXXMemberCallExpr(const CXXMemberCallExpr *expr);
SpirvInstruction *doCXXOperatorCallExpr(const CXXOperatorCallExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doExtMatrixElementExpr(const ExtMatrixElementExpr *expr);
SpirvInstruction *doHLSLVectorElementExpr(const HLSLVectorElementExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doInitListExpr(const InitListExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doMemberExpr(const MemberExpr *expr,
SourceRange rangeOverride = {});
SpirvInstruction *doUnaryOperator(const UnaryOperator *expr);
SpirvInstruction *
doUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *expr);
/// Overload with pre computed SpirvEvalInfo.
///
/// The given expr will not be evaluated again.
SpirvInstruction *loadIfGLValue(const Expr *expr, SpirvInstruction *info);
/// Loads the pointer of the aliased-to-variable if the given expression is a
/// DeclRefExpr referencing an alias variable. See DeclResultIdMapper for
/// more explanation regarding this.
///
/// Note: legalization specific code
SpirvInstruction *loadIfAliasVarRef(const Expr *expr,
SourceRange rangeOverride = {});
/// Loads the pointer of the aliased-to-variable and ajusts aliasVarInfo
/// accordingly if aliasVarExpr is referencing an alias variable. Returns true
/// if aliasVarInfo is changed, false otherwise.
///
/// Note: legalization specific code
bool loadIfAliasVarRef(const Expr *aliasVarExpr,
SpirvInstruction **aliasVarInstr,
SourceRange rangeOverride = {});
/// Check whether a member value has a nointerpolation qualifier in its type
/// declaration or any parents' type declaration recursively.
bool isNoInterpMemberExpr(const MemberExpr *expr);
private:
/// Translates the given frontend binary operator into its SPIR-V equivalent
/// taking consideration of the operand type.
spv::Op translateOp(BinaryOperator::Opcode op, QualType type);
spv::Op translateWaveOp(hlsl::IntrinsicOp op, QualType type, SourceLocation);
/// Generates SPIR-V instructions for the given normal (non-intrinsic and
/// non-operator) standalone or member function call.
SpirvInstruction *processCall(const CallExpr *expr);
/// Generates the necessary instructions for assigning rhs to lhs. If lhsPtr
/// is not zero, it will be used as the pointer from lhs instead of evaluating
/// lhs again.
SpirvInstruction *processAssignment(const Expr *lhs, SpirvInstruction *rhs,
bool isCompoundAssignment,
SpirvInstruction *lhsPtr = nullptr,
SourceRange range = {});
/// Generates SPIR-V instructions to store rhsVal into lhsPtr. This will be
/// recursive if lhsValType is a composite type. rhsExpr will be used as a
/// reference to adjust the CodeGen if not nullptr.
void storeValue(SpirvInstruction *lhsPtr, SpirvInstruction *rhsVal,
QualType lhsValType, SourceLocation loc,
SourceRange range = {});
/// Decomposes and reconstructs the given srcVal of the given valType to meet
/// the requirements of the dstLR layout rule.
SpirvInstruction *reconstructValue(SpirvInstruction *srcVal, QualType valType,
SpirvLayoutRule dstLR, SourceLocation loc,
SourceRange range = {});
/// Generates the necessary instructions for conducting the given binary
/// operation on lhs and rhs.
///
/// computationType is the type for LHS and RHS when doing computation, while
/// resultType is the type of the whole binary operation. They can be
/// different for compound assignments like <some-int-value> *=
/// <some-float-value>, where computationType is float and resultType is int.
///
/// If lhsResultId is not nullptr, the evaluated pointer from lhs during the
/// process will be written into it. If mandateGenOpcode is not spv::Op::Max,
/// it will used as the SPIR-V opcode instead of deducing from Clang frontend
/// opcode.
SpirvInstruction *
processBinaryOp(const Expr *lhs, const Expr *rhs, BinaryOperatorKind opcode,
QualType computationType, QualType resultType, SourceRange,
SourceLocation, SpirvInstruction **lhsInfo = nullptr,
spv::Op mandateGenOpcode = spv::Op::Max);
/// Generates SPIR-V instructions to initialize the given variable once.
void initOnce(QualType varType, std::string varName, SpirvVariable *,
const Expr *varInit);
/// Returns true if the given expression will be translated into a vector
/// shuffle instruction in SPIR-V.
///
/// We emit a vector shuffle instruction iff
/// * We are not selecting only one element from the vector (OpAccessChain
/// or OpCompositeExtract for such case);
/// * We are not selecting all elements in their original order (essentially
/// the original vector, no shuffling needed).
bool isVectorShuffle(const Expr *expr);
/// Returns true if the given expression is a short-circuited operator.
bool isShortCircuitedOp(const Expr *expr);
/// Returns true if the given statement or any of its children are a
/// short-circuited operator.
bool stmtTreeContainsShortCircuitedOp(const Stmt *stmt);
/// \brief Returns true if the given CXXOperatorCallExpr is indexing into a
/// Buffer/RWBuffer/Texture/RWTexture using operator[].
/// On success, writes the base buffer into *base if base is not nullptr, and
/// writes the index into *index if index is not nullptr.
bool isBufferTextureIndexing(const CXXOperatorCallExpr *,
const Expr **base = nullptr,
const Expr **index = nullptr);
bool isDescriptorHeap(const Expr *expr);
void getDescriptorHeapOperands(const Expr *expr, const Expr **base,
const Expr **index);
/// \brief Returns true if the given CXXOperatorCallExpr is the .mips[][]
/// access into a Texture or .sample[][] access into Texture2DMS(Array). On
/// success, writes base texture object into *base if base is not nullptr,
/// writes the index into *index if index is not nullptr, and writes the mip
/// level (lod) to *lod if lod is not nullptr.
bool isTextureMipsSampleIndexing(const CXXOperatorCallExpr *indexExpr,
const Expr **base = nullptr,
const Expr **index = nullptr,
const Expr **lod = nullptr);
/// Condenses a sequence of HLSLVectorElementExpr starting from the given
/// expr into one. Writes the original base into *basePtr and the condensed
/// accessor into *flattenedAccessor.
void condenseVectorElementExpr(
const HLSLVectorElementExpr *expr, const Expr **basePtr,
hlsl::VectorMemberAccessPositions *flattenedAccessor);
/// Generates necessary SPIR-V instructions to create a vector splat out of
/// the given scalarExpr. The generated vector will have the same element
/// type as scalarExpr and of the given size. If resultIsConstant is not
/// nullptr, writes true to it if the generated instruction is a constant.
SpirvInstruction *createVectorSplat(const Expr *scalarExpr, uint32_t size,
SourceRange rangeOverride = {});
/// Splits the given vector into the last element and the rest (as a new
/// vector).
void splitVecLastElement(QualType vecType, SpirvInstruction *vec,
SpirvInstruction **residual,
SpirvInstruction **lastElement, SourceLocation loc);
/// Converts a vector value into the given struct type with its element type's
/// <result-id> as elemTypeId.
///
/// Assumes the vector and the struct have matching number of elements. Panics
/// otherwise.
SpirvInstruction *convertVectorToStruct(QualType structType,
QualType elemType,
SpirvInstruction *vector,
SourceLocation loc,
SourceRange range = {});
/// Translates a floatN * float multiplication into SPIR-V instructions and
/// returns the <result-id>. Returns 0 if the given binary operation is not
/// floatN * float.
SpirvInstruction *tryToGenFloatVectorScale(const BinaryOperator *expr);
/// Translates a floatMxN * float multiplication into SPIR-V instructions and
/// returns the <result-id>. Returns 0 if the given binary operation is not
/// floatMxN * float.
SpirvInstruction *tryToGenFloatMatrixScale(const BinaryOperator *expr);
/// Tries to emit instructions for assigning to the given vector element
/// accessing expression. Returns 0 if the trial fails and no instructions
/// are generated.
SpirvInstruction *tryToAssignToVectorElements(const Expr *lhs,
SpirvInstruction *rhs,
SourceRange range = {});
/// Tries to emit instructions for assigning to the given matrix element
/// accessing expression. Returns 0 if the trial fails and no instructions
/// are generated.
SpirvInstruction *tryToAssignToMatrixElements(const Expr *lhs,
SpirvInstruction *rhs,
SourceRange range = {});
/// Tries to emit instructions for assigning to the given RWBuffer/RWTexture
/// object. Returns 0 if the trial fails and no instructions are generated.
SpirvInstruction *tryToAssignToRWBufferRWTexture(const Expr *lhs,
SpirvInstruction *rhs,
SourceRange range = {});
/// Tries to emit instructions for assigning to the given mesh out attribute
/// or indices object. Returns 0 if the trial fails and no instructions are
/// generated.
SpirvInstruction *
tryToAssignToMSOutAttrsOrIndices(const Expr *lhs, SpirvInstruction *rhs,
SpirvInstruction *vecComponent = nullptr,
bool noWriteBack = false);
/// Emit instructions for assigning to the given mesh out attribute.
void assignToMSOutAttribute(
const DeclaratorDecl *decl, SpirvInstruction *value,
const llvm::SmallVector<SpirvInstruction *, 4> &indices);
/// Emit instructions for assigning to the given mesh out indices object.
void
assignToMSOutIndices(const DeclaratorDecl *decl, SpirvInstruction *value,
const llvm::SmallVector<SpirvInstruction *, 4> &indices);
/// Processes each vector within the given matrix by calling actOnEachVector.
/// matrixVal should be the loaded value of the matrix. actOnEachVector takes
/// three parameters for the current vector: the index, the <type-id>, and
/// the value. It returns the <result-id> of the processed vector.
SpirvInstruction *processEachVectorInMatrix(
const Expr *matrix, SpirvInstruction *matrixVal,
llvm::function_ref<SpirvInstruction *(uint32_t, QualType, QualType,
SpirvInstruction *)>
actOnEachVector,
SourceLocation loc = {}, SourceRange range = {});
SpirvInstruction *processEachVectorInMatrix(
const Expr *matrix, QualType outputType, SpirvInstruction *matrixVal,
llvm::function_ref<SpirvInstruction *(uint32_t, QualType, QualType,
SpirvInstruction *)>
actOnEachVector,
SourceLocation loc = {}, SourceRange range = {});
/// Translates the given varDecl into a spec constant.
void createSpecConstant(const VarDecl *varDecl);
/// Generates the necessary instructions for conducting the given binary
/// operation on lhs and rhs.
///
/// This method expects that both lhs and rhs are SPIR-V acceptable matrices.
SpirvInstruction *processMatrixBinaryOp(const Expr *lhs, const Expr *rhs,
const BinaryOperatorKind opcode,
SourceRange, SourceLocation);
/// Creates a temporary local variable in the current function of the given
/// varType and varName. Initializes the variable with the given initValue.
/// Returns the instruction pointer for the variable.
SpirvVariable *createTemporaryVar(QualType varType, llvm::StringRef varName,
SpirvInstruction *initValue,
SourceLocation loc);
/// Collects all indices from consecutive MemberExprs, ArraySubscriptExprs and
/// CXXOperatorCallExprs. Also special handles all mesh shader out attributes
/// to return the entire expression in order for caller to extract the member
/// expression.
const Expr *
collectArrayStructIndices(const Expr *expr, bool rawIndex,
llvm::SmallVectorImpl<uint32_t> *rawIndices,
llvm::SmallVectorImpl<SpirvInstruction *> *indices,
bool *isMSOutAttribute = nullptr,
bool *isNointerp = nullptr);
/// For L-values, creates an access chain to index into the given SPIR-V
/// evaluation result and returns the new SPIR-V evaluation result.
/// For R-values, stores it in a variable, then create the access chain and
/// return the evaluation result.
SpirvInstruction *derefOrCreatePointerToValue(
QualType baseType, SpirvInstruction *base, QualType elemType,
const llvm::SmallVector<SpirvInstruction *, 4> &indices,
SourceLocation loc, SourceRange range = {});
SpirvVariable *turnIntoLValue(QualType type, SpirvInstruction *source,
SourceLocation loc);
private:
/// Validates that vk::* attributes are used correctly and returns false if
/// errors are found.
bool validateVKAttributes(const NamedDecl *decl);
/// Records any Spir-V capabilities and extensions for the given varDecl so
/// they will be added to the SPIR-V module.
void registerCapabilitiesAndExtensionsForVarDecl(const VarDecl *varDecl);
private:
/// Converts the given value from the bitwidth of 'fromType' to the bitwidth
/// of 'toType'. If the two have the same bitwidth, returns the value itself.
/// If resultType is not nullptr, the resulting value's type will be written
/// to resultType. Panics if the given types are not scalar or vector of
/// float/integer type.
SpirvInstruction *convertBitwidth(SpirvInstruction *value, SourceLocation loc,
QualType fromType, QualType toType,
QualType *resultType = nullptr,
SourceRange range = {});
/// Processes the given expr, casts the result into the given bool (vector)
/// type and returns the <result-id> of the casted value.
SpirvInstruction *castToBool(SpirvInstruction *value, QualType fromType,
QualType toType, SourceLocation loc,
SourceRange range = {});
/// Processes the given expr, casts the result into the given integer (vector)
/// type and returns the <result-id> of the casted value.
SpirvInstruction *castToInt(SpirvInstruction *value, QualType fromType,
QualType toType, SourceLocation,
SourceRange srcRange = {});
/// Processes the given expr, casts the result into the given float (vector)
/// type and returns the <result-id> of the casted value.
SpirvInstruction *castToFloat(SpirvInstruction *value, QualType fromType,
QualType toType, SourceLocation,
SourceRange range = {});
private:
/// Processes HLSL instrinsic functions.
SpirvInstruction *processIntrinsicCallExpr(const CallExpr *);
/// Processes the 'clip' intrinsic function. Discards the current pixel if the
/// specified value is less than zero.
SpirvInstruction *processIntrinsicClip(const CallExpr *);
/// Processes the 'dst' intrinsic function.
SpirvInstruction *processIntrinsicDst(const CallExpr *);
/// Processes the 'clamp' intrinsic function.
SpirvInstruction *processIntrinsicClamp(const CallExpr *);
/// Processes the 'frexp' intrinsic function.
SpirvInstruction *processIntrinsicFrexp(const CallExpr *);
/// Processes the 'ldexp' intrinsic function.
SpirvInstruction *processIntrinsicLdexp(const CallExpr *);
/// Processes the 'D3DCOLORtoUBYTE4' intrinsic function.
SpirvInstruction *processD3DCOLORtoUBYTE4(const CallExpr *);
/// Processes the 'lit' intrinsic function.
SpirvInstruction *processIntrinsicLit(const CallExpr *);
/// Processes the 'GroupMemoryBarrier', 'GroupMemoryBarrierWithGroupSync',
/// 'DeviceMemoryBarrier', 'DeviceMemoryBarrierWithGroupSync',
/// 'AllMemoryBarrier', and 'AllMemoryBarrierWithGroupSync' intrinsic
/// functions.
SpirvInstruction *processIntrinsicMemoryBarrier(const CallExpr *,
bool isDevice, bool groupSync,
bool isAllBarrier);
/// Processes the 'mad' intrinsic function.
SpirvInstruction *processIntrinsicMad(const CallExpr *);
/// Processes the 'modf' intrinsic function.
SpirvInstruction *processIntrinsicModf(const CallExpr *);
/// Processes the 'msad4' intrinsic function.
SpirvInstruction *processIntrinsicMsad4(const CallExpr *);
/// Processes the 'mul' intrinsic function.
SpirvInstruction *processIntrinsicMul(const CallExpr *);
/// Processes the 'printf' intrinsic function.
SpirvInstruction *processIntrinsicPrintf(const CallExpr *);
/// Transposes a non-floating point matrix and returns the result-id of the
/// transpose.
SpirvInstruction *processNonFpMatrixTranspose(QualType matType,
SpirvInstruction *matrix,
SourceLocation loc,
SourceRange range = {});
/// Processes the dot product of two non-floating point vectors. The SPIR-V
/// OpDot only accepts float vectors. Assumes that the two vectors are of the
/// same size and have the same element type (elemType).
SpirvInstruction *processNonFpDot(SpirvInstruction *vec1Id,
SpirvInstruction *vec2Id, uint32_t vecSize,
QualType elemType, SourceLocation loc,
SourceRange range = {});
/// Processes the multiplication of a *non-floating point* matrix by a scalar.
/// Assumes that the matrix element type and the scalar type are the same.
SpirvInstruction *
processNonFpScalarTimesMatrix(QualType scalarType, SpirvInstruction *scalar,
QualType matType, SpirvInstruction *matrix,
SourceLocation loc, SourceRange range = {});
/// Processes the multiplication of a *non-floating point* matrix by a vector.
/// Assumes the matrix element type and the vector element type are the same.
/// Notice that the vector in this case is a "row vector" and will be
/// multiplied by the matrix columns (dot product). As a result, the given
/// matrix must be transposed in order to easily get each column. If
/// 'matrixTranspose' is non-zero, it will be used as the transpose matrix
/// result-id; otherwise the function will perform the transpose itself.
SpirvInstruction *processNonFpVectorTimesMatrix(
QualType vecType, SpirvInstruction *vector, QualType matType,
SpirvInstruction *matrix, SourceLocation loc,
SpirvInstruction *matrixTranspose = nullptr, SourceRange range = {});
/// Processes the multiplication of a vector by a *non-floating point* matrix.
/// Assumes the matrix element type and the vector element type are the same.
SpirvInstruction *
processNonFpMatrixTimesVector(QualType matType, SpirvInstruction *matrix,
QualType vecType, SpirvInstruction *vector,
SourceLocation loc, SourceRange range = {});
/// Processes a non-floating point matrix multiplication. Assumes that the
/// number of columns in lhs matrix is the same as number of rows in the rhs
/// matrix. Also assumes that the two matrices have the same element type.
SpirvInstruction *
processNonFpMatrixTimesMatrix(QualType lhsType, SpirvInstruction *lhs,
QualType rhsType, SpirvInstruction *rhs,
SourceLocation loc, SourceRange range = {});
/// Processes the 'dot' intrinsic function.
SpirvInstruction *processIntrinsicDot(const CallExpr *);
/// Processes the 'log10' intrinsic function.
SpirvInstruction *processIntrinsicLog10(const CallExpr *);
/// Processes the 'all' and 'any' intrinsic functions.
SpirvInstruction *processIntrinsicAllOrAny(const CallExpr *, spv::Op);
/// Processes the 'asfloat', 'asint', and 'asuint' intrinsic functions.
SpirvInstruction *processIntrinsicAsType(const CallExpr *);
/// Processes the 'saturate' intrinsic function.
SpirvInstruction *processIntrinsicSaturate(const CallExpr *);
/// Processes the 'sincos' intrinsic function.
SpirvInstruction *processIntrinsicSinCos(const CallExpr *);
/// Processes the 'isFinite' intrinsic function.
SpirvInstruction *processIntrinsicIsFinite(const CallExpr *);
/// Processes the 'rcp' intrinsic function.
SpirvInstruction *processIntrinsicRcp(const CallExpr *);
/// Processes the 'ReadClock' intrinsic function.
SpirvInstruction *processIntrinsicReadClock(const CallExpr *);
/// Processes the 'sign' intrinsic function for float types.
/// The FSign instruction in the GLSL instruction set returns a floating point
/// result. The HLSL sign function, however, returns an integer. An extra
/// casting from float to integer is therefore performed by this method.
SpirvInstruction *processIntrinsicFloatSign(const CallExpr *);
/// Processes the 'f16to32' intrinsic function.
SpirvInstruction *processIntrinsicF16ToF32(const CallExpr *);
/// Processes the 'f32tof16' intrinsic function.
SpirvInstruction *processIntrinsicF32ToF16(const CallExpr *);
/// Processes the given intrinsic function call using the given GLSL
/// extended instruction. If the given instruction cannot operate on matrices,
/// it performs the instruction on each row of the matrix and uses composite
/// construction to generate the resulting matrix.
SpirvInstruction *processIntrinsicUsingGLSLInst(const CallExpr *,
GLSLstd450 instr,
bool canOperateOnMatrix,
SourceLocation,
SourceRange range = {});
/// Processes the given intrinsic function call using the given SPIR-V
/// instruction. If the given instruction cannot operate on matrices, it
/// performs the instruction on each row of the matrix and uses composite
/// construction to generate the resulting matrix.
SpirvInstruction *processIntrinsicUsingSpirvInst(const CallExpr *, spv::Op,
bool canOperateOnMatrix);
/// Processes the given intrinsic member call.
SpirvInstruction *processIntrinsicMemberCall(const CXXMemberCallExpr *expr,
hlsl::IntrinsicOp opcode);
/// Processes Interlocked* intrinsic functions.
SpirvInstruction *processIntrinsicInterlockedMethod(const CallExpr *,
hlsl::IntrinsicOp);
/// Processes SM6.0 wave query intrinsic calls.
SpirvInstruction *processWaveQuery(const CallExpr *, spv::Op opcode);
/// Processes SM6.6 IsHelperLane intrisic calls.
SpirvInstruction *processIsHelperLane(const CallExpr *, SourceLocation loc,
SourceRange range);
/// Processes SM6.0 wave vote intrinsic calls.
SpirvInstruction *processWaveVote(const CallExpr *, spv::Op opcode);
/// Processes SM6.0 wave active/prefix count bits.
SpirvInstruction *processWaveCountBits(const CallExpr *,
spv::GroupOperation groupOp);
/// Processes SM6.0 wave reduction or scan/prefix and SM6.5 wave multiprefix
/// intrinsic calls.
SpirvInstruction *processWaveReductionOrPrefix(const CallExpr *, spv::Op op,
spv::GroupOperation groupOp);
/// Processes SM6.0 wave broadcast intrinsic calls.
SpirvInstruction *processWaveBroadcast(const CallExpr *);
/// Processes SM6.0 quad-wide shuffle.
SpirvInstruction *processWaveQuadWideShuffle(const CallExpr *,
hlsl::IntrinsicOp op);
/// Generates the Spir-V instructions needed to implement the given call to
/// WaveActiveAllEqual. Returns a pointer to the instruction that produces the
/// final result.
SpirvInstruction *processWaveActiveAllEqual(const CallExpr *);
/// Generates the Spir-V instructions needed to implement WaveActiveAllEqual
/// with the scalar input `arg`. Returns a pointer to the instruction that
/// produces the final result. srcLoc should be the source location of the
/// original call.
SpirvInstruction *
processWaveActiveAllEqualScalar(SpirvInstruction *arg,
clang::SourceLocation srcLoc);
/// Generates the Spir-V instructions needed to implement WaveActiveAllEqual
/// with the vector input `arg`. Returns a pointer to the instruction that
/// produces the final result. srcLoc should be the source location of the
/// original call.
SpirvInstruction *
processWaveActiveAllEqualVector(SpirvInstruction *arg,
clang::SourceLocation srcLoc);
/// Generates the Spir-V instructions needed to implement WaveActiveAllEqual
/// with the matrix input `arg`. Returns a pointer to the instruction that
/// produces the final result. srcLoc should be the source location of the
/// original call.
SpirvInstruction *
processWaveActiveAllEqualMatrix(SpirvInstruction *arg, QualType,
clang::SourceLocation srcLoc);
/// Processes SM6.5 WaveMatch function.
SpirvInstruction *processWaveMatch(const CallExpr *);
/// Processes the NonUniformResourceIndex intrinsic function.
SpirvInstruction *processIntrinsicNonUniformResourceIndex(const CallExpr *);
/// Processes the SM 6.4 dot4add_{i|u}8packed intrinsic functions.
SpirvInstruction *processIntrinsicDP4a(const CallExpr *callExpr,
hlsl::IntrinsicOp op);
/// Processes the SM 6.4 dot2add intrinsic function.
SpirvInstruction *processIntrinsicDP2a(const CallExpr *callExpr);
/// Processes the SM 6.6 pack_{s|u}8 and pack_clamp_{s|u}8 intrinsic
/// functions.
SpirvInstruction *processIntrinsic8BitPack(const CallExpr *,
hlsl::IntrinsicOp);
/// Processes the SM 6.6 unpack_{s|u}8{s|u}{16|32} intrinsic functions.
SpirvInstruction *processIntrinsic8BitUnpack(const CallExpr *,
hlsl::IntrinsicOp);
/// Process builtins specific to raytracing.
SpirvInstruction *processRayBuiltins(const CallExpr *, hlsl::IntrinsicOp op);
/// Process raytracing intrinsics.
SpirvInstruction *processReportHit(const CallExpr *);
void processCallShader(const CallExpr *callExpr);
void processTraceRay(const CallExpr *callExpr);
/// Process amplification shader intrinsics.
void processDispatchMesh(const CallExpr *callExpr);
/// Process mesh shader intrinsics.
void processMeshOutputCounts(const CallExpr *callExpr);
/// Process GetAttributeAtVertex for barycentrics.
SpirvInstruction *processGetAttributeAtVertex(const CallExpr *expr);
/// Process ray query traceinline intrinsics.
SpirvInstruction *processTraceRayInline(const CXXMemberCallExpr *expr);
/// Process ray query intrinsics
SpirvInstruction *processRayQueryIntrinsics(const CXXMemberCallExpr *expr,
hlsl::IntrinsicOp opcode);
/// Process spirv intrinsic instruction
SpirvInstruction *processSpvIntrinsicCallExpr(const CallExpr *expr);
/// Process spirv intrinsic type definition
SpirvInstruction *processSpvIntrinsicTypeDef(const CallExpr *expr);
/// Process `T vk::RawBufferLoad<T>(in uint64_t address
/// [, in uint alignment])` that loads data from a given device address.
SpirvInstruction *processRawBufferLoad(const CallExpr *callExpr);
SpirvInstruction *loadDataFromRawAddress(SpirvInstruction *addressInUInt64,
QualType bufferType,
uint32_t alignment,
SourceLocation loc);
/// Process `void vk::RawBufferStore<T>(in uint64_t address, in T value
/// [, in uint alignment])` that stores data to a given device address.
SpirvInstruction *processRawBufferStore(const CallExpr *callExpr);
SpirvInstruction *storeDataToRawAddress(SpirvInstruction *addressInUInt64,
SpirvInstruction *value,
QualType bufferType,
uint32_t alignment,
SourceLocation loc,
SourceRange range);
/// Returns the value of the alignment argument for `vk::RawBufferLoad()` and
/// `vk::RawBufferStore()`.
uint32_t getRawBufferAlignment(const Expr *expr);
/// Process vk::ext_execution_mode intrinsic
SpirvInstruction *processIntrinsicExecutionMode(const CallExpr *expr,
bool useIdParams);
/// Processes the 'firstbit{high|low}' intrinsic functions.
SpirvInstruction *processIntrinsicFirstbit(const CallExpr *,
GLSLstd450 glslOpcode);
private:
/// Returns the <result-id> for constant value 0 of the given type.
SpirvConstant *getValueZero(QualType type);
/// Returns the <result-id> for a constant zero vector of the given size and
/// element type.
SpirvConstant *getVecValueZero(QualType elemType, uint32_t size);
/// Returns the <result-id> for constant value 1 of the given type.
SpirvConstant *getValueOne(QualType type);
/// Returns the <result-id> for a constant one vector of the given size and
/// element type.
SpirvConstant *getVecValueOne(QualType elemType, uint32_t size);
/// Returns the <result-id> for a constant one (vector) having the same
/// element type as the given matrix type.
///
/// If a 1x1 matrix is given, the returned value one will be a scalar;
/// if a Mx1 or 1xN matrix is given, the returned value one will be a
/// vector of size M or N; if a MxN matrix is given, the returned value
/// one will be a vector of size N.
SpirvConstant *getMatElemValueOne(QualType type);
/// Returns a SPIR-V constant equal to the bitwdith of the given type minus
/// one. The returned constant has the same component count and bitwidth as
/// the given type.
SpirvConstant *getMaskForBitwidthValue(QualType type);
private:
/// \brief Performs a FlatConversion implicit cast. Fills an instance of the
/// given type with initializer <result-id>.
SpirvInstruction *processFlatConversion(const QualType type,
SpirvInstruction *initId,
SourceLocation,
SourceRange range = {});
private:
/// Translates the given HLSL loop attribute into SPIR-V loop control mask.
/// Emits an error if the given attribute is not a loop attribute.
spv::LoopControlMask translateLoopAttribute(const Stmt *, const Attr &);
static hlsl::ShaderModel::Kind getShaderModelKind(StringRef stageName);
static spv::ExecutionModel getSpirvShaderStage(hlsl::ShaderModel::Kind smk,
bool);
/// \brief Handle inline SPIR-V attributes for the entry function.
void processInlineSpirvAttributes(const FunctionDecl *entryFunction);
/// \brief Adds necessary execution modes for the hull/domain shaders based on
/// the HLSL attributes of the entry point function.
/// In the case of hull shaders, also writes the number of output control
/// points to *numOutputControlPoints. Returns true on success, and false on
/// failure.
bool processTessellationShaderAttributes(const FunctionDecl *entryFunction,
uint32_t *numOutputControlPoints);
/// \brief Adds necessary execution modes for the geometry shader based on the
/// HLSL attributes of the entry point function. Also writes the array size of
/// the input, which depends on the primitive type, to *arraySize.
bool processGeometryShaderAttributes(const FunctionDecl *entryFunction,
uint32_t *arraySize);
/// \brief Adds necessary execution modes for the pixel shader based on the
/// HLSL attributes of the entry point function.
void processPixelShaderAttributes(const FunctionDecl *decl);
/// \brief Adds necessary execution modes for the compute shader based on the
/// HLSL attributes of the entry point function.
void processComputeShaderAttributes(const FunctionDecl *entryFunction);
/// \brief Adds necessary execution modes for the mesh/amplification shader
/// based on the HLSL attributes of the entry point function.
bool
processMeshOrAmplificationShaderAttributes(const FunctionDecl *decl,
uint32_t *outVerticesArraySize);
/// \brief Emits a wrapper function for the entry function and returns true
/// on success.
///
/// The wrapper function loads the values of all stage input variables and
/// creates composites as expected by the source code entry function. It then
/// calls the source code entry point and writes out stage output variables
/// by extracting sub-values from the return value. In this way, we can handle
/// the source code entry point as a normal function.
///
/// The wrapper function is also responsible for initializing global static
/// variables for some cases.
bool emitEntryFunctionWrapper(const FunctionDecl *entryFunction,
SpirvFunction *entryFuncId);
/// \brief Emits a wrapper function for the entry functions for raytracing
/// stages and returns true on success.
///
/// Wrapper is specific to raytracing stages since for specific stages we
/// create specific module scoped stage variables and perform copies to them.
/// The wrapper function is also responsible for initializing global static
/// variables for some cases.
bool emitEntryFunctionWrapperForRayTracing(const FunctionDecl *entryFunction,
SpirvFunction *entryFuncId);
/// \brief Performs the following operations for the Hull shader:
/// * Creates an output variable which is an Array containing results for all
/// control points.
///
/// * If the Patch Constant Function (PCF) takes the Hull main entry function
/// results (OutputPatch), it creates a temporary function-scope variable that
/// is then passed to the PCF.
///
/// * Adds a control barrier (OpControlBarrier) to ensure all invocations are
/// done before PCF is called.
///
/// * Prepares the necessary parameters to pass to the PCF (Can be one or more
/// of InputPatch, OutputPatch, PrimitiveId).
///
/// * The execution thread with ControlPointId (invocationID) of 0 calls the
/// PCF. e.g. if(id == 0) pcf();
///
/// * Gathers the results of the PCF and assigns them to stage output
/// variables.
///
/// The method panics if it is called for any shader kind other than Hull
/// shaders.
bool processHSEntryPointOutputAndPCF(
const FunctionDecl *hullMainFuncDecl, QualType retType,
SpirvInstruction *retVal, uint32_t numOutputControlPoints,
SpirvInstruction *outputControlPointId, SpirvInstruction *primitiveId,
SpirvInstruction *viewId, SpirvInstruction *hullMainInputPatch);
private:
/// \brief Returns true iff *all* the case values in the given switch
/// statement are integer literals. In such cases OpSwitch can be used to
/// represent the switch statement.
/// We only care about the case values to be compared with the selector. They
/// may appear in the top level CaseStmt or be nested in a CompoundStmt.Fall
/// through cases will result in the second situation.
bool allSwitchCasesAreIntegerLiterals(const Stmt *root);
/// \brief Recursively discovers all CaseStmt and DefaultStmt under the
/// sub-tree of the given root. Recursively goes down the tree iff it finds a
/// CaseStmt, DefaultStmt, or CompoundStmt. It does not recurse on other
/// statement types. For each discovered case, a basic block is created and
/// registered within the module, and added as a successor to the current
/// active basic block.
///
/// Writes a vector of (integer, basic block label) pairs for all cases to the
/// given 'targets' argument. If a DefaultStmt is found, it also returns the
/// label for the default basic block through the defaultBB parameter. This
/// method panics if it finds a case value that is not an integer literal.
void discoverAllCaseStmtInSwitchStmt(
const Stmt *root, SpirvBasicBlock **defaultBB,
std::vector<std::pair<llvm::APInt, SpirvBasicBlock *>> *targets);
/// Flattens structured AST of the given switch statement into a vector of AST
/// nodes and stores into flatSwitch.
///
/// The AST for a switch statement may look arbitrarily different based on
/// several factors such as placement of cases, placement of breaks, placement
/// of braces, and fallthrough cases.
///
/// A CaseStmt for instance is the child node of a CompoundStmt for
/// regular cases and it is the child node of another CaseStmt for fallthrough
/// cases.
///
/// A BreakStmt for instance could be the child node of a CompoundStmt
/// for regular cases, or the child node of a CaseStmt for some fallthrough
/// cases.
///
/// This method flattens the AST representation of a switch statement to make
/// it easier to process for translation.
/// For example:
///
/// switch(a) {
/// case 1:
/// <Stmt1>
/// case 2:
/// <Stmt2>
/// break;
/// case 3:
/// case 4:
/// <Stmt4>
/// break;
/// deafult:
/// <Stmt5>
/// }
///
/// is flattened to the following vector:
///
/// +-----+-----+-----+-----+-----+-----+-----+-----+-----+-------+-----+
/// |Case1|Stmt1|Case2|Stmt2|Break|Case3|Case4|Stmt4|Break|Default|Stmt5|
/// +-----+-----+-----+-----+-----+-----+-----+-----+-----+-------+-----+
///
void flattenSwitchStmtAST(const Stmt *root,
std::vector<const Stmt *> *flatSwitch);
void processCaseStmtOrDefaultStmt(const Stmt *stmt);
void processSwitchStmtUsingSpirvOpSwitch(const SwitchStmt *switchStmt);
/// Translates a switch statement into SPIR-V conditional branches.
///
/// This is done by constructing AST if statements out of the cases using the
/// following pattern:
/// if { ... } else if { ... } else if { ... } else { ... }
/// And then calling the SPIR-V codegen methods for these if statements.
///
/// Each case comparison is turned into an if statement, and the "then" body
/// of the if statement will be the body of the case.
/// If a default statements exists, it becomes the body of the "else"
/// statement.
void processSwitchStmtUsingIfStmts(const SwitchStmt *switchStmt);
/// Handles the offset argument in the given method call at the given argument
/// index. Panics if the argument at the given index does not exist. Writes
/// the <result-id> to either *constOffset or *varOffset, depending on the
/// constantness of the offset.
void handleOffsetInMethodCall(const CXXMemberCallExpr *expr, uint32_t index,
SpirvInstruction **constOffset,
SpirvInstruction **varOffset);
/// \brief Processes .Load() method call for Buffer/RWBuffer and texture
/// objects.
SpirvInstruction *processBufferTextureLoad(const CXXMemberCallExpr *);
/// \brief Loads one element from the given Buffer/RWBuffer/Texture object at
/// the given location. The type of the loaded element matches the type in the
/// declaration for the Buffer/Texture object.
/// If residencyCodeId is not zero, the SPIR-V instruction for storing the
/// resulting residency code will also be emitted.
SpirvInstruction *
processBufferTextureLoad(const Expr *object, SpirvInstruction *location,
SpirvInstruction *constOffset, SpirvInstruction *lod,
SpirvInstruction *residencyCode, SourceLocation loc,
SourceRange range = {});
/// \brief Processes .Sample() and .Gather() method calls for texture objects.
SpirvInstruction *processTextureSampleGather(const CXXMemberCallExpr *expr,
bool isSample);
/// \brief Processes .SampleBias() and .SampleLevel() method calls for texture
/// objects.
SpirvInstruction *processTextureSampleBiasLevel(const CXXMemberCallExpr *expr,
bool isBias);
/// \brief Processes .SampleGrad() method call for texture objects.
SpirvInstruction *processTextureSampleGrad(const CXXMemberCallExpr *expr);
/// \brief Processes .SampleCmp() method call for texture objects.
SpirvInstruction *processTextureSampleCmp(const CXXMemberCallExpr *expr);
/// \brief Processes .SampleCmpLevelZero() method call for texture objects.
SpirvInstruction *
processTextureSampleCmpLevelZero(const CXXMemberCallExpr *expr);
/// \brief Processes .SampleCmpLevel() method call for texture objects.
SpirvInstruction *processTextureSampleCmpLevel(const CXXMemberCallExpr *expr);
/// \brief Handles .Gather{|Cmp}{Red|Green|Blue|Alpha}() calls on texture
/// types.
SpirvInstruction *
processTextureGatherRGBACmpRGBA(const CXXMemberCallExpr *expr, bool isCmp,
uint32_t component);
/// \brief Handles .GatherCmp() calls on texture types.
SpirvInstruction *processTextureGatherCmp(const CXXMemberCallExpr *expr);
/// \brief Returns the calculated level-of-detail (a single float value) for
/// the given texture. Handles intrinsic HLSL CalculateLevelOfDetail or
/// CalculateLevelOfDetailUnclamped function depending on the given unclamped
/// parameter.
SpirvInstruction *processTextureLevelOfDetail(const CXXMemberCallExpr *expr,
bool unclamped);
/// \brief Processes the .GetDimensions() call on supported objects.
SpirvInstruction *processGetDimensions(const CXXMemberCallExpr *);
/// \brief Queries the given (RW)Buffer/(RW)Texture image in the given expr
/// for the requested information. Based on the dimension of the image, the
/// following info can be queried: width, height, depth, number of mipmap
/// levels.
SpirvInstruction *
processBufferTextureGetDimensions(const CXXMemberCallExpr *);
/// \brief Generates an OpAccessChain instruction for the given
/// (RW)StructuredBuffer.Load() method call.
SpirvInstruction *processStructuredBufferLoad(const CXXMemberCallExpr *expr);
/// \brief Increments or decrements the counter for RW/Append/Consume
/// structured buffer. If loadObject is true, the object upon which the call
/// is made will be evaluated and translated into SPIR-V.
SpirvInstruction *incDecRWACSBufferCounter(const CXXMemberCallExpr *call,
bool isInc,
bool loadObject = true);
/// Assigns the counter variable associated with srcExpr to the one associated
/// with dstDecl if the dstDecl is an internal RW/Append/Consume structured
/// buffer. Returns false if there is no associated counter variable for
/// srcExpr or dstDecl.
///
/// Note: legalization specific code
bool tryToAssignCounterVar(const DeclaratorDecl *dstDecl,
const Expr *srcExpr);
bool tryToAssignCounterVar(const Expr *dstExpr, const Expr *srcExpr);
/// Returns an instruction that points to the alias counter variable with the
/// entity represented by expr.
///
/// This method only handles final alias structured buffers, which means
/// AssocCounter#1 and AssocCounter#2.
SpirvInstruction *
getFinalACSBufferCounterAliasAddressInstruction(const Expr *expr);
/// Returns an instruction that points to the counter variable with the entity
/// represented by expr.
///
/// This method only handles final alias structured buffers, which means
/// AssocCounter#1 and AssocCounter#2.
SpirvInstruction *getFinalACSBufferCounterInstruction(const Expr *expr);
/// Returns the counter variable's information associated with the entity
/// represented by the given decl.
///
/// This method only handles final alias structured buffers, which means
/// AssocCounter#1 and AssocCounter#2.
const CounterIdAliasPair *getFinalACSBufferCounter(const Expr *expr);
/// This method handles AssocCounter#3 and AssocCounter#4.
const CounterVarFields *
getIntermediateACSBufferCounter(const Expr *expr,
llvm::SmallVector<uint32_t, 4> *indices);
/// Gets or creates an ImplicitParamDecl to represent the implicit object
/// parameter of the given method.
const ImplicitParamDecl *
getOrCreateDeclForMethodObject(const CXXMethodDecl *method);
/// \brief Loads numWords 32-bit unsigned integers or stores numWords 32-bit
/// unsigned integers (based on the doStore parameter) to the given
/// ByteAddressBuffer. Loading is allowed from a ByteAddressBuffer or
/// RWByteAddressBuffer. Storing is allowed only to RWByteAddressBuffer.
/// Panics if it is not the case.
SpirvInstruction *processByteAddressBufferLoadStore(const CXXMemberCallExpr *,
uint32_t numWords,
bool doStore);
/// \brief Processes the GetDimensions intrinsic function call on a
/// (RW)ByteAddressBuffer by querying the image in the given expr.
SpirvInstruction *processByteAddressBufferStructuredBufferGetDimensions(
const CXXMemberCallExpr *);
/// \brief Processes the Interlocked* intrinsic function call on a
/// RWByteAddressBuffer.
SpirvInstruction *
processRWByteAddressBufferAtomicMethods(hlsl::IntrinsicOp opcode,
const CXXMemberCallExpr *);
/// \brief Processes the GetSamplePosition intrinsic method call on a
/// Texture2DMS(Array).
SpirvInstruction *processGetSamplePosition(const CXXMemberCallExpr *);
/// \brief Processes the SubpassLoad intrinsic function call on a
/// SubpassInput(MS).
SpirvInstruction *processSubpassLoad(const CXXMemberCallExpr *);
/// \brief Generates SPIR-V instructions for the .Append()/.Consume() call on
/// the given {Append|Consume}StructuredBuffer. Returns the <result-id> of
/// the loaded value for .Consume; returns zero for .Append().
SpirvInstruction *
processACSBufferAppendConsume(const CXXMemberCallExpr *expr);
/// \brief Generates SPIR-V instructions to emit the current vertex in GS.
SpirvInstruction *processStreamOutputAppend(const CXXMemberCallExpr *expr);
/// \brief Generates SPIR-V instructions to end emitting the current
/// primitive in GS.
SpirvInstruction *processStreamOutputRestart(const CXXMemberCallExpr *expr);
/// \brief Emulates GetSamplePosition() for standard sample settings, i.e.,
/// with 1, 2, 4, 8, or 16 samples. Returns float2(0) for other cases.
SpirvInstruction *emitGetSamplePosition(SpirvInstruction *sampleCount,
SpirvInstruction *sampleIndex,
SourceLocation loc,
SourceRange range = {});
/// \brief Returns OpAccessChain to the struct/class object that defines
/// memberFn when the struct/class is a base struct/class of objectType.
/// If the struct/class that defines memberFn is not a base of objectType,
/// returns nullptr.
SpirvInstruction *getBaseOfMemberFunction(QualType objectType,
SpirvInstruction *objInstr,
const CXXMethodDecl *memberFn,
SourceLocation loc);
/// \brief Takes a vector of size 4, and returns a vector of size 1 or 2 or 3
/// or 4. Creates a CompositeExtract or VectorShuffle instruction to extract
/// a scalar or smaller vector from the beginning of the input vector if
/// necessary. Assumes that 'fromId' is the <result-id> of a vector of size 4.
/// Panics if the target vector size is not 1, 2, 3, or 4.
SpirvInstruction *extractVecFromVec4(SpirvInstruction *fromInstr,
uint32_t targetVecSize,
QualType targetElemType,
SourceLocation loc,
SourceRange range = {});
/// \brief Creates SPIR-V instructions for sampling the given image.
/// It utilizes the ModuleBuilder's createImageSample and it ensures that the
/// returned type is handled correctly.
/// HLSL image sampling methods may return a scalar, vec1, vec2, vec3, or
/// vec4. But non-Dref image sampling instructions in SPIR-V must always
/// return a vec4. As a result, an extra processing step is necessary.
SpirvInstruction *
createImageSample(QualType retType, QualType imageType,
SpirvInstruction *image, SpirvInstruction *sampler,
SpirvInstruction *coordinate, SpirvInstruction *compareVal,
SpirvInstruction *bias, SpirvInstruction *lod,
std::pair<SpirvInstruction *, SpirvInstruction *> grad,
SpirvInstruction *constOffset, SpirvInstruction *varOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *minLod, SpirvInstruction *residencyCodeId,
SourceLocation loc, SourceRange range = {});
/// \brief Returns OpVariable to be used as 'Interface' operands of
/// OpEntryPoint. entryPoint is the SpirvFunction for the OpEntryPoint.
std::vector<SpirvVariable *>
getInterfacesForEntryPoint(SpirvFunction *entryPoint);
/// \brief Emits OpBeginInvocationInterlockEXT and add the appropriate
/// execution mode, if it has not already been added.
void beginInvocationInterlock(SourceLocation loc, SourceRange range);
/// \brief If the given FunctionDecl is not already in the workQueue, creates
/// a FunctionInfo object for it, and inserts it into the workQueue. It also
/// updates the functionInfoMap with the proper mapping.
void addFunctionToWorkQueue(hlsl::DXIL::ShaderKind,
const clang::FunctionDecl *,
bool isEntryFunction);
/// \brief Helper function to run SPIRV-Tools optimizer's performance passes.
/// Runs the SPIRV-Tools optimizer on the given SPIR-V module |mod|, and
/// gets the info/warning/error messages via |messages|.
/// Returns true on success and false otherwise.
bool spirvToolsOptimize(std::vector<uint32_t> *mod, std::string *messages);
// \brief Runs the pass represented by the given pass token on the module.
// Returns true if the pass was successfully run. Any messages from the
// optimizer are returned in `messages`.
bool spirvToolsRunPass(std::vector<uint32_t> *mod,
spvtools::Optimizer::PassToken token,
std::string *messages);
// \brief Calls SPIRV-Tools optimizer fix-opextinst-opcodes pass. This pass
// fixes OpExtInst/OpExtInstWithForwardRefsKHR opcodes to use the correct one
// depending of the presence of forward references.
bool spirvToolsFixupOpExtInst(std::vector<uint32_t> *mod,
std::string *messages);
// \brief Calls SPIRV-Tools optimizer's, but only with the capability trimming
// pass. Removes unused capabilities from the given SPIR-V module |mod|, and
// returns info/warning/error messages via |messages|. This pass doesn't trim
// all capabilities. To see the list of supported capabilities, check the pass
// headers.
bool spirvToolsTrimCapabilities(std::vector<uint32_t> *mod,
std::string *messages);
// \brief Runs the upgrade memory model pass using SPIRV-Tools's optimizer.
// This pass will modify the module, |mod|, so that it conforms to the Vulkan
// memory model instead of the GLSL450 memory model. Returns
// info/warning/error messages via |messages|.
bool spirvToolsUpgradeToVulkanMemoryModel(std::vector<uint32_t> *mod,
std::string *messages);
/// \brief Helper function to run SPIRV-Tools optimizer's legalization passes.
/// Runs the SPIRV-Tools legalization on the given SPIR-V module |mod|, and
/// gets the info/warning/error messages via |messages|. If
/// |dsetbindingsToCombineImageSampler| is not empty, runs
/// --convert-to-sampled-image pass.
/// Returns true on success and false otherwise.
bool
spirvToolsLegalize(std::vector<uint32_t> *mod, std::string *messages,
const std::vector<spvtools::opt::DescriptorSetAndBinding>
*dsetbindingsToCombineImageSampler);
/// \brief Helper function to run the SPIRV-Tools validator.
/// Runs the SPIRV-Tools validator on the given SPIR-V module |mod|, and
/// gets the info/warning/error messages via |messages|.
/// Returns true on success and false otherwise.
bool spirvToolsValidate(std::vector<uint32_t> *mod, std::string *messages);
/// Adds the appropriate derivative group execution mode to the entry point.
/// The entry point must already have a LocalSize execution mode, which will
/// be used to determine which execution mode (quad or linear) is required.
/// This decision is made according to the rules in
/// https://microsoft.github.io/DirectX-Specs/d3d/HLSL_SM_6_6_Derivatives.html.
void addDerivativeGroupExecutionMode();
/// Creates an input variable for `param` that will be used by the patch
/// constant function. The parameter is also added to the patch constant
/// function. The wrapper function will copy the input variable to the
/// parameter.
SpirvVariable *
createPCFParmVarAndInitFromStageInputVar(const ParmVarDecl *param);
/// Returns a function scope parameter with the same type as |param|.
SpirvVariable *createFunctionScopeTempFromParameter(const ParmVarDecl *param);
/// Returns a vector of SpirvInstruction that is the decompostion of `inst`
/// into scalars. This is recursive. For example, a struct of a 4 element
/// vector will return 4 scalars.
std::vector<SpirvInstruction *> decomposeToScalars(SpirvInstruction *inst);
/// Returns a spirv instruction with the value of the given type and layout
/// rule that is obtained by assigning each scalar in `type` to corresponding
/// value in `scalars`. This is the inverse of `decomposeToScalars`.
SpirvInstruction *
generateFromScalars(QualType type, std::vector<SpirvInstruction *> &scalars,
SpirvLayoutRule layoutRule);
/// Returns a spirv instruction with the value of the given type and layout
/// rule that is obtained by assigning `scalar` each scalar in `type`. This is
/// the same as calling `generateFromScalars` with a sufficiently large vector
/// where every element is `scalar`.
SpirvInstruction *splatScalarToGenerate(QualType type,
SpirvInstruction *scalar,
SpirvLayoutRule rule);
/// Modifies the instruction in the code that use the GLSL450 memory module to
/// use the Vulkan memory model. This is done only if it has been requested or
/// the Vulkan memory model capability has been added to the module.
bool UpgradeToVulkanMemoryModelIfNeeded(std::vector<uint32_t> *module);
public:
/// \brief Wrapper method to create a fatal error message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitFatalError(const char (&message)[N],
SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Fatal, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create an error message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Error, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create a warning message and report it
/// in the diagnostic engine associated with this consumer
template <unsigned N>
DiagnosticBuilder emitWarning(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Warning, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create a note message and report it
/// in the diagnostic engine associated with this consumer
template <unsigned N>
DiagnosticBuilder emitNote(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Note, message);
return diags.Report(loc, diagId);
}
private:
CompilerInstance &theCompilerInstance;
ASTContext &astContext;
DiagnosticsEngine &diags;
SpirvCodeGenOptions &spirvOptions;
/// \brief Entry function name, derived from the command line
/// and should be const.
const llvm::StringRef hlslEntryFunctionName;
/// \brief Structure to maintain record of all entry functions and any
/// reachable functions.
struct FunctionInfo {
public:
hlsl::ShaderModel::Kind shaderModelKind;
const DeclaratorDecl *funcDecl;
SpirvFunction *entryFunction;
bool isEntryFunction;
FunctionInfo() = default;
FunctionInfo(hlsl::ShaderModel::Kind smk, const DeclaratorDecl *fDecl,
SpirvFunction *entryFunc, bool isEntryFunc)
: shaderModelKind(smk), funcDecl(fDecl), entryFunction(entryFunc),
isEntryFunction(isEntryFunc) {}
};
SpirvContext spvContext;
FeatureManager featureManager;
SpirvBuilder spvBuilder;
DeclResultIdMapper declIdMapper;
ConstEvaluator constEvaluator;
/// \brief A map of funcDecl to its FunctionInfo. Consists of all entry
/// functions followed by all reachable functions from the entry functions.
llvm::DenseMap<const DeclaratorDecl *, FunctionInfo *> functionInfoMap;
/// A queue of FunctionInfo reachable from all the entry functions.
std::vector<const FunctionInfo *> workQueue;
/// Get SPIR-V entrypoint name for the given FunctionInfo.
llvm::StringRef getEntryPointName(const FunctionInfo *entryInfo);
/// <result-id> for the entry function. Initially it is zero and will be reset
/// when starting to translate the entry function.
SpirvFunction *entryFunction;
/// The current function under traversal.
const FunctionDecl *curFunction;
/// The SPIR-V function parameter for the current this object.
SpirvInstruction *curThis;
/// The source location of a push constant block we have previously seen.
/// Invalid means no push constant blocks defined thus far.
SourceLocation seenPushConstantAt;
/// Indicates whether the current emitter is in specialization constant mode:
/// all 32-bit scalar constants will be translated into OpSpecConstant.
bool isSpecConstantMode;
/// Whether the translated SPIR-V binary needs legalization.
///
/// The following cases will require legalization:
///
/// 1. Opaque types (textures, samplers) within structs
/// 2. Structured buffer aliasing
/// 3. Using SPIR-V instructions not allowed in the currect shader stage
///
/// This covers the first and third case.
///
/// If this is true, SPIRV-Tools legalization passes will be executed after
/// the translation to legalize the generated SPIR-V binary.
///
/// Note: legalization specific code
bool needsLegalization;
/// Whether the translated SPIR-V binary passes --before-hlsl-legalization
/// option to spirv-val because of illegal function parameter scope.
bool beforeHlslLegalization;
/// Mapping from methods to the decls to represent their implicit object
/// parameters
///
/// We need this map because that we need to update the associated counters on
/// the implicit object when invoking method calls. The ImplicitParamDecl
/// mapped to serves as a key to find the associated counters in
/// DeclResultIdMapper.
///
/// Note: legalization specific code
llvm::DenseMap<const CXXMethodDecl *, const ImplicitParamDecl *> thisDecls;
/// Global variables that should be initialized once at the begining of the
/// entry function.
llvm::SmallVector<const VarDecl *, 4> toInitGloalVars;
/// For loops, while loops, and switch statements may encounter "break"
/// statements that alter their control flow. At any point the break statement
/// is observed, the control flow jumps to the inner-most scope's merge block.
/// For instance: the break in the following example should cause a branch to
/// the SwitchMergeBB, not ForLoopMergeBB:
/// for (...) {
/// switch(...) {
/// case 1: break;
/// }
/// <--- SwitchMergeBB
/// }
/// <---- ForLoopMergeBB
/// This stack keeps track of the basic blocks to which branching could occur.
std::stack<SpirvBasicBlock *> breakStack;
/// Loops (do, while, for) may encounter "continue" statements that alter
/// their control flow. At any point the continue statement is observed, the
/// control flow jumps to the inner-most scope's continue block.
/// This stack keeps track of the basic blocks to which such branching could
/// occur.
std::stack<SpirvBasicBlock *> continueStack;
/// Maps a given statement to the basic block that is associated with it.
llvm::DenseMap<const Stmt *, SpirvBasicBlock *> stmtBasicBlock;
/// Maintains mapping from a type to SPIR-V variable along with SPIR-V
/// instruction for id of location decoration Used for raytracing stage
/// variables of storage class RayPayloadNV, CallableDataNV and
/// HitAttributeNV.
llvm::SmallDenseMap<QualType,
std::pair<SpirvInstruction *, SpirvInstruction *>, 4>
rayPayloadMap;
llvm::SmallDenseMap<QualType, SpirvInstruction *, 4> hitAttributeMap;
llvm::SmallDenseMap<QualType,
std::pair<SpirvInstruction *, SpirvInstruction *>, 4>
callDataMap;
/// Incoming ray payload for current entry function being translated.
/// Only valid for any-hit/closest-hit ray tracing shaders.
SpirvInstruction *currentRayPayload;
/// This is the Patch Constant Function. This function is not explicitly
/// called from the entry point function.
FunctionDecl *patchConstFunc;
/// The <result-id> of the OpString containing the main source file's path.
SpirvString *mainSourceFile;
/// ParentMap of the current function.
std::unique_ptr<ParentMap> parentMap = nullptr;
};
void SpirvEmitter::doDeclStmt(const DeclStmt *declStmt) {
for (auto *decl : declStmt->decls())
doDecl(decl);
}
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/FeatureManager.cpp | //===---- FeatureManager.cpp - SPIR-V Version/Extension Manager -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/FeatureManager.h"
#include <array>
#include <sstream>
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringSwitch.h"
namespace clang {
namespace spirv {
namespace {
constexpr std::array<std::pair<const char *, spv_target_env>, 6>
kKnownTargetEnv = {{{"vulkan1.0", SPV_ENV_VULKAN_1_0},
{"vulkan1.1", SPV_ENV_VULKAN_1_1},
{"vulkan1.1spirv1.4", SPV_ENV_VULKAN_1_1_SPIRV_1_4},
{"vulkan1.2", SPV_ENV_VULKAN_1_2},
{"vulkan1.3", SPV_ENV_VULKAN_1_3},
{"universal1.5", SPV_ENV_UNIVERSAL_1_5}}};
constexpr std::array<std::pair<spv_target_env, const char *>, 6>
kHumanReadableTargetEnv = {
{{SPV_ENV_VULKAN_1_0, "Vulkan 1.0"},
{SPV_ENV_VULKAN_1_1, "Vulkan 1.1"},
{SPV_ENV_VULKAN_1_1_SPIRV_1_4, "Vulkan 1.1 with SPIR-V 1.4"},
{SPV_ENV_VULKAN_1_2, "Vulkan 1.2"},
{SPV_ENV_VULKAN_1_3, "Vulkan 1.3"},
{SPV_ENV_UNIVERSAL_1_5, "SPIR-V 1.5"}}};
constexpr std::array<std::pair<spv_target_env, std::pair<uint32_t, uint32_t>>,
6>
kTargetEnvToSpirvVersion = {{{SPV_ENV_VULKAN_1_0, {1, 0}},
{SPV_ENV_VULKAN_1_1, {1, 3}},
{SPV_ENV_VULKAN_1_1_SPIRV_1_4, {1, 4}},
{SPV_ENV_VULKAN_1_2, {1, 5}},
{SPV_ENV_VULKAN_1_3, {1, 6}},
{SPV_ENV_UNIVERSAL_1_5, {1, 5}}}};
static_assert(
kKnownTargetEnv.size() == kHumanReadableTargetEnv.size(),
"kKnownTargetEnv and kHumanReadableTargetEnv should remain in sync.");
} // end namespace
llvm::Optional<spv_target_env>
FeatureManager::stringToSpvEnvironment(const std::string &target_env) {
auto it =
std::find_if(kKnownTargetEnv.cbegin(), kKnownTargetEnv.cend(),
[&](const auto &pair) { return pair.first == target_env; });
return it == kKnownTargetEnv.end()
? llvm::None
: llvm::Optional<spv_target_env>(it->second);
}
clang::VersionTuple FeatureManager::getSpirvVersion(spv_target_env env) {
auto it = std::find_if(kTargetEnvToSpirvVersion.cbegin(),
kTargetEnvToSpirvVersion.cend(),
[&](const auto &pair) { return pair.first == env; });
return it == kTargetEnvToSpirvVersion.end()
? clang::VersionTuple()
: clang::VersionTuple(it->second.first, it->second.second);
}
llvm::Optional<std::string>
FeatureManager::spvEnvironmentToPrettyName(spv_target_env target_env) {
auto it = std::find_if(
kHumanReadableTargetEnv.cbegin(), kHumanReadableTargetEnv.cend(),
[&](const auto &pair) { return pair.first == target_env; });
return it == kHumanReadableTargetEnv.end()
? llvm::None
: llvm::Optional<std::string>(it->second);
}
FeatureManager::FeatureManager(DiagnosticsEngine &de,
const SpirvCodeGenOptions &opts)
: diags(de) {
allowedExtensions.resize(static_cast<unsigned>(Extension::Unknown) + 1);
targetEnvStr = opts.targetEnv;
llvm::Optional<spv_target_env> targetEnvOpt =
stringToSpvEnvironment(opts.targetEnv);
if (!targetEnvOpt) {
emitError("unknown SPIR-V target environment '%0'", {}) << opts.targetEnv;
emitNote("allowed options are:\n vulkan1.0\n vulkan1.1\n "
"vulkan1.1spirv1.4\n vulkan1.2\n vulkan1.3\n universal1.5",
{});
return;
}
targetEnv = *targetEnvOpt;
if (opts.allowedExtensions.empty()) {
// If no explicit extension control from command line, use the default mode:
// allowing all extensions that are enabled by default.
allowAllKnownExtensions();
} else {
for (auto ext : opts.allowedExtensions)
allowExtension(ext);
// The option to use the vulkan memory model implies the extension is
// available.
if (opts.useVulkanMemoryModel) {
allowExtension("SPV_KHR_vulkan_memory_model");
}
}
}
bool FeatureManager::allowExtension(llvm::StringRef name) {
// Special case: If we are asked to allow "SPV_KHR" extension, it indicates
// that we should allow using *all* KHR extensions.
if (getExtensionSymbol(name) == Extension::KHR) {
bool result = true;
for (uint32_t i = 0; i < static_cast<uint32_t>(Extension::Unknown); ++i) {
llvm::StringRef extName(getExtensionName(static_cast<Extension>(i)));
if (isKHRExtension(extName))
result = result && allowExtension(extName);
}
return result;
}
const auto symbol = getExtensionSymbol(name);
if (symbol == Extension::Unknown) {
emitError("unknown SPIR-V extension '%0'", {}) << name;
emitNote("known extensions are\n%0", {})
<< getKnownExtensions("\n* ", "* ");
return false;
}
allowedExtensions.set(static_cast<unsigned>(symbol));
return true;
}
void FeatureManager::allowAllKnownExtensions() {
allowedExtensions.set();
const auto numExtensions = static_cast<uint32_t>(Extension::Unknown);
for (uint32_t ext = 0; ext < numExtensions; ++ext)
if (!enabledByDefault(static_cast<Extension>(ext)))
allowedExtensions.reset(ext);
}
bool FeatureManager::requestExtension(Extension ext, llvm::StringRef target,
SourceLocation srcLoc) {
if (allowedExtensions.test(static_cast<unsigned>(ext)))
return true;
emitError("SPIR-V extension '%0' required for %1 but not permitted to use",
srcLoc)
<< getExtensionName(ext) << target;
return false;
}
bool FeatureManager::requestTargetEnv(spv_target_env requestedEnv,
llvm::StringRef target,
SourceLocation srcLoc) {
if (targetEnv < requestedEnv) {
auto envName = spvEnvironmentToPrettyName(requestedEnv);
emitError("%0 is required for %1 but not permitted to use", srcLoc)
<< envName.getValueOr("unknown") << target;
emitNote("please specify your target environment via command line option "
"-fspv-target-env=",
{});
return false;
}
return true;
}
Extension FeatureManager::getExtensionSymbol(llvm::StringRef name) {
return llvm::StringSwitch<Extension>(name)
.Case("KHR", Extension::KHR)
.Case("SPV_KHR_16bit_storage", Extension::KHR_16bit_storage)
.Case("SPV_KHR_device_group", Extension::KHR_device_group)
.Case("SPV_KHR_multiview", Extension::KHR_multiview)
.Case("SPV_KHR_non_semantic_info", Extension::KHR_non_semantic_info)
.Case("SPV_KHR_shader_draw_parameters",
Extension::KHR_shader_draw_parameters)
.Case("SPV_KHR_ray_tracing", Extension::KHR_ray_tracing)
.Case("SPV_EXT_demote_to_helper_invocation",
Extension::EXT_demote_to_helper_invocation)
.Case("SPV_EXT_descriptor_indexing", Extension::EXT_descriptor_indexing)
.Case("SPV_EXT_fragment_fully_covered",
Extension::EXT_fragment_fully_covered)
.Case("SPV_EXT_fragment_invocation_density",
Extension::EXT_fragment_invocation_density)
.Case("SPV_EXT_fragment_shader_interlock",
Extension::EXT_fragment_shader_interlock)
.Case("SPV_EXT_mesh_shader", Extension::EXT_mesh_shader)
.Case("SPV_EXT_shader_stencil_export",
Extension::EXT_shader_stencil_export)
.Case("SPV_EXT_shader_viewport_index_layer",
Extension::EXT_shader_viewport_index_layer)
.Case("SPV_AMD_shader_early_and_late_fragment_tests",
Extension::AMD_shader_early_and_late_fragment_tests)
.Case("SPV_GOOGLE_hlsl_functionality1",
Extension::GOOGLE_hlsl_functionality1)
.Case("SPV_GOOGLE_user_type", Extension::GOOGLE_user_type)
.Case("SPV_KHR_post_depth_coverage", Extension::KHR_post_depth_coverage)
.Case("SPV_KHR_shader_clock", Extension::KHR_shader_clock)
.Case("SPV_NV_ray_tracing", Extension::NV_ray_tracing)
.Case("SPV_NV_mesh_shader", Extension::NV_mesh_shader)
.Case("SPV_KHR_ray_query", Extension::KHR_ray_query)
.Case("SPV_KHR_fragment_shading_rate",
Extension::KHR_fragment_shading_rate)
.Case("SPV_EXT_shader_image_int64", Extension::EXT_shader_image_int64)
.Case("SPV_KHR_physical_storage_buffer",
Extension::KHR_physical_storage_buffer)
.Case("SPV_KHR_vulkan_memory_model", Extension::KHR_vulkan_memory_model)
.Case("SPV_NV_compute_shader_derivatives",
Extension::NV_compute_shader_derivatives)
.Case("SPV_KHR_fragment_shader_barycentric",
Extension::KHR_fragment_shader_barycentric)
.Case("SPV_KHR_maximal_reconvergence",
Extension::KHR_maximal_reconvergence)
.Case("SPV_KHR_float_controls", Extension::KHR_float_controls)
.Case("SPV_NV_shader_subgroup_partitioned",
Extension::NV_shader_subgroup_partitioned)
.Default(Extension::Unknown);
}
const char *FeatureManager::getExtensionName(Extension symbol) {
switch (symbol) {
case Extension::KHR:
return "KHR";
case Extension::KHR_16bit_storage:
return "SPV_KHR_16bit_storage";
case Extension::KHR_device_group:
return "SPV_KHR_device_group";
case Extension::KHR_multiview:
return "SPV_KHR_multiview";
case Extension::KHR_non_semantic_info:
return "SPV_KHR_non_semantic_info";
case Extension::KHR_shader_draw_parameters:
return "SPV_KHR_shader_draw_parameters";
case Extension::KHR_post_depth_coverage:
return "SPV_KHR_post_depth_coverage";
case Extension::KHR_ray_tracing:
return "SPV_KHR_ray_tracing";
case Extension::KHR_shader_clock:
return "SPV_KHR_shader_clock";
case Extension::EXT_demote_to_helper_invocation:
return "SPV_EXT_demote_to_helper_invocation";
case Extension::EXT_descriptor_indexing:
return "SPV_EXT_descriptor_indexing";
case Extension::EXT_fragment_fully_covered:
return "SPV_EXT_fragment_fully_covered";
case Extension::EXT_fragment_invocation_density:
return "SPV_EXT_fragment_invocation_density";
case Extension::EXT_fragment_shader_interlock:
return "SPV_EXT_fragment_shader_interlock";
case Extension::EXT_mesh_shader:
return "SPV_EXT_mesh_shader";
case Extension::EXT_shader_stencil_export:
return "SPV_EXT_shader_stencil_export";
case Extension::EXT_shader_viewport_index_layer:
return "SPV_EXT_shader_viewport_index_layer";
case Extension::AMD_shader_early_and_late_fragment_tests:
return "SPV_AMD_shader_early_and_late_fragment_tests";
case Extension::GOOGLE_hlsl_functionality1:
return "SPV_GOOGLE_hlsl_functionality1";
case Extension::GOOGLE_user_type:
return "SPV_GOOGLE_user_type";
case Extension::NV_ray_tracing:
return "SPV_NV_ray_tracing";
case Extension::NV_mesh_shader:
return "SPV_NV_mesh_shader";
case Extension::KHR_ray_query:
return "SPV_KHR_ray_query";
case Extension::KHR_fragment_shading_rate:
return "SPV_KHR_fragment_shading_rate";
case Extension::EXT_shader_image_int64:
return "SPV_EXT_shader_image_int64";
case Extension::KHR_physical_storage_buffer:
return "SPV_KHR_physical_storage_buffer";
case Extension::KHR_vulkan_memory_model:
return "SPV_KHR_vulkan_memory_model";
case Extension::NV_compute_shader_derivatives:
return "SPV_NV_compute_shader_derivatives";
case Extension::KHR_fragment_shader_barycentric:
return "SPV_KHR_fragment_shader_barycentric";
case Extension::KHR_maximal_reconvergence:
return "SPV_KHR_maximal_reconvergence";
case Extension::KHR_float_controls:
return "SPV_KHR_float_controls";
case Extension::NV_shader_subgroup_partitioned:
return "SPV_NV_shader_subgroup_partitioned";
default:
break;
}
return "<unknown extension>";
}
bool FeatureManager::isKHRExtension(llvm::StringRef name) {
return name.startswith_lower("spv_khr_");
}
std::string FeatureManager::getKnownExtensions(const char *delimiter,
const char *prefix,
const char *postfix) {
std::ostringstream oss;
oss << prefix;
const auto numExtensions = static_cast<uint32_t>(Extension::Unknown);
for (uint32_t i = 0; i < numExtensions; ++i) {
oss << getExtensionName(static_cast<Extension>(i));
if (i + 1 < numExtensions)
oss << delimiter;
}
oss << postfix;
return oss.str();
}
bool FeatureManager::isExtensionRequiredForTargetEnv(Extension ext) {
bool required = true;
if (targetEnv >= SPV_ENV_VULKAN_1_3) {
// The following extensions are incorporated into Vulkan 1.3 or above, and
// are therefore not required to be emitted for that target environment.
switch (ext) {
case Extension::KHR_non_semantic_info:
required = false;
break;
default:
break;
}
}
if (required && targetEnv >= SPV_ENV_VULKAN_1_1) {
// The following extensions are incorporated into Vulkan 1.1 or above, and
// are therefore not required to be emitted for that target environment.
// TODO: Also add the following extensions if we start to support them.
// * SPV_KHR_storage_buffer_storage_class
// * SPV_KHR_variable_pointers
switch (ext) {
case Extension::KHR_16bit_storage:
case Extension::KHR_device_group:
case Extension::KHR_multiview:
case Extension::KHR_shader_draw_parameters:
required = false;
break;
default:
// Only 1.1 or above extensions can be suppressed.
break;
}
}
return required;
}
bool FeatureManager::isExtensionEnabled(Extension ext) {
bool allowed = false;
if (ext != Extension::Unknown &&
allowedExtensions.test(static_cast<unsigned>(ext)))
allowed = true;
return allowed;
}
bool FeatureManager::enabledByDefault(Extension ext) {
switch (ext) {
// KHR_ray_tracing and NV_ray_tracing are mutually exclusive so enable only
// KHR extension by default
case Extension::NV_ray_tracing:
return false;
// Enabling EXT_demote_to_helper_invocation changes the code generation
// behavior for the 'discard' statement. Therefore we will only enable it if
// the user explicitly asks for it.
case Extension::EXT_demote_to_helper_invocation:
return false;
case Extension::EXT_mesh_shader:
// Enabling EXT_mesh_shader only when the target environment is SPIR-V 1.4
// or above
return isTargetEnvSpirv1p4OrAbove();
default:
return true;
}
}
bool FeatureManager::isTargetEnvVulkan1p1OrAbove() {
return targetEnv >= SPV_ENV_VULKAN_1_1;
}
bool FeatureManager::isTargetEnvSpirv1p4OrAbove() {
return targetEnv >= SPV_ENV_UNIVERSAL_1_4;
}
bool FeatureManager::isTargetEnvVulkan1p1Spirv1p4OrAbove() {
return targetEnv >= SPV_ENV_VULKAN_1_1_SPIRV_1_4;
}
bool FeatureManager::isTargetEnvVulkan1p2OrAbove() {
return targetEnv >= SPV_ENV_VULKAN_1_2;
}
bool FeatureManager::isTargetEnvVulkan1p3OrAbove() {
return targetEnv >= SPV_ENV_VULKAN_1_3;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/LiteralTypeVisitor.cpp | //===--- LiteralTypeVisitor.cpp - Literal Type Visitor -----------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "LiteralTypeVisitor.h"
#include "LowerTypeVisitor.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvFunction.h"
namespace clang {
namespace spirv {
bool LiteralTypeVisitor::visit(SpirvFunction *fn, Phase phase) {
assert(fn);
// Before going through the function instructions
if (phase == Visitor::Phase::Init) {
curFnAstReturnType = fn->getAstReturnType();
}
return true;
}
bool LiteralTypeVisitor::isLiteralLargerThan32Bits(
SpirvConstantInteger *constant) {
assert(constant->hasAstResultType());
QualType type = constant->getAstResultType();
const bool isSigned = type->isSignedIntegerType();
const llvm::APInt &value = constant->getValue();
return (isSigned && !value.isSignedIntN(32)) ||
(!isSigned && !value.isIntN(32));
}
bool LiteralTypeVisitor::canDeduceTypeFromLitType(QualType litType,
QualType newType) {
if (litType == QualType() || newType == QualType() || litType == newType)
return false;
// The 'inout' and 'out' function arguments are of a reference type.
// For example: 'uint &'.
// We should first remove such reference from QualType (if any).
if (const auto *refType = litType->getAs<ReferenceType>())
litType = refType->getPointeeType();
if (const auto *refType = newType->getAs<ReferenceType>())
newType = refType->getPointeeType();
if (!isLitTypeOrVecOfLitType(litType))
return false;
if (isLitTypeOrVecOfLitType(newType))
return false;
if (litType->isFloatingType() && newType->isFloatingType())
return true;
if ((litType->isIntegerType() && !litType->isBooleanType()) &&
(newType->isIntegerType() && !newType->isBooleanType()))
return true;
{
QualType elemType1 = {};
uint32_t elemCount1 = 0;
QualType elemType2 = {};
uint32_t elemCount2 = 0;
if (isVectorType(litType, &elemType1, &elemCount1) &&
isVectorType(newType, &elemType2, &elemCount2))
return elemCount1 == elemCount2 &&
canDeduceTypeFromLitType(elemType1, elemType2);
}
return false;
}
void LiteralTypeVisitor::tryToUpdateInstLitType(SpirvInstruction *inst,
QualType newType) {
if (!inst)
return;
// We may only update LitInt to Int type and LitFloat to Float type.
if (!canDeduceTypeFromLitType(inst->getAstResultType(), newType))
return;
// Since LiteralTypeVisitor is run before lowering the types, we can simply
// update the AST result-type of the instruction to the new type. In the case
// of the instruction being a constant instruction, since we do not have
// unique constants at this point, changing the QualType of the constant
// instruction is safe.
inst->setAstResultType(newType);
}
bool LiteralTypeVisitor::visitInstruction(SpirvInstruction *instr) {
// Instructions that don't have custom visitors cannot help with deducing the
// real type from the literal type.
return true;
}
bool LiteralTypeVisitor::visit(SpirvVariable *var) {
tryToUpdateInstLitType(var->getInitializer(), var->getAstResultType());
return true;
}
bool LiteralTypeVisitor::visit(SpirvAtomic *inst) {
const auto resultType = inst->getAstResultType();
tryToUpdateInstLitType(inst->getValue(), resultType);
tryToUpdateInstLitType(inst->getComparator(), resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvUnaryOp *inst) {
const auto opcode = inst->getopcode();
const auto resultType = inst->getAstResultType();
auto *arg = inst->getOperand();
const auto argType = arg->getAstResultType();
if (!isLitTypeOrVecOfLitType(argType)) {
return true;
}
if (isLitTypeOrVecOfLitType(resultType)) {
return true;
}
switch (opcode) {
case spv::Op::OpUConvert:
case spv::Op::OpSConvert:
case spv::Op::OpFConvert:
// The result type gives us no information about the operand type. Do not do
// anything.
return true;
case spv::Op::OpConvertFToU:
case spv::Op::OpConvertFToS:
case spv::Op::OpConvertSToF:
case spv::Op::OpConvertUToF:
case spv::Op::OpNot:
case spv::Op::OpBitcast:
case spv::Op::OpSNegate: {
// The cases can change the type, but not the bitwidth. We can use the
// result type's bitwidth and the operand's type.
const uint32_t resultTypeBitwidth = getElementSpirvBitwidth(
astContext, resultType, spvOptions.enable16BitTypes);
const QualType newType =
getTypeWithCustomBitwidth(astContext, argType, resultTypeBitwidth);
tryToUpdateInstLitType(arg, newType);
return true;
}
default:
// In all other cases, try to set the operand type to the result type.
tryToUpdateInstLitType(arg, resultType);
return true;
}
}
bool LiteralTypeVisitor::visit(SpirvBinaryOp *inst) {
const auto resultType = inst->getAstResultType();
const auto op = inst->getopcode();
auto *operand1 = inst->getOperand1();
auto *operand2 = inst->getOperand2();
switch (op) {
case spv::Op::OpShiftRightLogical:
case spv::Op::OpShiftRightArithmetic:
case spv::Op::OpShiftLeftLogical: {
// Base (arg1) should have the same type as result type
tryToUpdateInstLitType(inst->getOperand1(), resultType);
// The shift amount (arg2) cannot be a 64-bit type for a 32-bit base!
tryToUpdateInstLitType(inst->getOperand2(), resultType);
return true;
}
// The following operations have a boolean return type, so we cannot deduce
// anything about the operand type from the result type. However, the two
// operands in these operations must have the same bitwidth.
case spv::Op::OpIEqual:
case spv::Op::OpINotEqual:
case spv::Op::OpUGreaterThan:
case spv::Op::OpSGreaterThan:
case spv::Op::OpUGreaterThanEqual:
case spv::Op::OpSGreaterThanEqual:
case spv::Op::OpULessThan:
case spv::Op::OpSLessThan:
case spv::Op::OpULessThanEqual:
case spv::Op::OpSLessThanEqual:
case spv::Op::OpFOrdEqual:
case spv::Op::OpFUnordEqual:
case spv::Op::OpFOrdNotEqual:
case spv::Op::OpFUnordNotEqual:
case spv::Op::OpFOrdLessThan:
case spv::Op::OpFUnordLessThan:
case spv::Op::OpFOrdGreaterThan:
case spv::Op::OpFUnordGreaterThan:
case spv::Op::OpFOrdLessThanEqual:
case spv::Op::OpFUnordLessThanEqual:
case spv::Op::OpFOrdGreaterThanEqual:
case spv::Op::OpFUnordGreaterThanEqual: {
if (operand1->hasAstResultType() && operand2->hasAstResultType()) {
const auto operand1Type = operand1->getAstResultType();
const auto operand2Type = operand2->getAstResultType();
bool isLitOp1 = isLitTypeOrVecOfLitType(operand1Type);
bool isLitOp2 = isLitTypeOrVecOfLitType(operand2Type);
if (isLitOp1 && !isLitOp2) {
const uint32_t operand2Bitwidth = getElementSpirvBitwidth(
astContext, operand2Type, spvOptions.enable16BitTypes);
const QualType newType = getTypeWithCustomBitwidth(
astContext, operand1Type, operand2Bitwidth);
tryToUpdateInstLitType(operand1, newType);
return true;
}
if (isLitOp2 && !isLitOp1) {
const uint32_t operand1Bitwidth = getElementSpirvBitwidth(
astContext, operand1Type, spvOptions.enable16BitTypes);
const QualType newType = getTypeWithCustomBitwidth(
astContext, operand2Type, operand1Bitwidth);
tryToUpdateInstLitType(operand2, newType);
return true;
}
}
break;
}
// The result type of dot product is scalar but operands should be vector of
// the same type.
case spv::Op::OpDot: {
tryToUpdateInstLitType(inst->getOperand1(),
inst->getOperand2()->getAstResultType());
tryToUpdateInstLitType(inst->getOperand2(),
inst->getOperand1()->getAstResultType());
return true;
}
case spv::Op::OpVectorTimesScalar: {
QualType elemType;
if (isVectorType(operand1->getAstResultType(), &elemType) &&
elemType->isFloatingType()) {
tryToUpdateInstLitType(inst->getOperand2(), elemType);
}
return true;
}
default:
break;
}
// General attempt to deduce operand types from the result type.
tryToUpdateInstLitType(operand1, resultType);
tryToUpdateInstLitType(operand2, resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvBitFieldInsert *inst) {
const auto resultType = inst->getAstResultType();
tryToUpdateInstLitType(inst->getBase(), resultType);
tryToUpdateInstLitType(inst->getInsert(), resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvBitFieldExtract *inst) {
const auto resultType = inst->getAstResultType();
tryToUpdateInstLitType(inst->getBase(), resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvSelect *inst) {
const auto resultType = inst->getAstResultType();
tryToUpdateInstLitType(inst->getTrueObject(), resultType);
tryToUpdateInstLitType(inst->getFalseObject(), resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvVectorShuffle *inst) {
const auto resultType = inst->getAstResultType();
if (inst->hasAstResultType() && !isLitTypeOrVecOfLitType(resultType)) {
auto *vec1 = inst->getVec1();
auto *vec2 = inst->getVec1();
assert(vec1 && vec2);
QualType resultElemType = {};
uint32_t resultElemCount = 0;
QualType vec1ElemType = {};
uint32_t vec1ElemCount = 0;
QualType vec2ElemType = {};
uint32_t vec2ElemCount = 0;
(void)isVectorType(resultType, &resultElemType, &resultElemCount);
(void)isVectorType(vec1->getAstResultType(), &vec1ElemType, &vec1ElemCount);
(void)isVectorType(vec2->getAstResultType(), &vec2ElemType, &vec2ElemCount);
if (isLitTypeOrVecOfLitType(vec1ElemType)) {
tryToUpdateInstLitType(
vec1, astContext.getExtVectorType(resultElemType, vec1ElemCount));
}
if (isLitTypeOrVecOfLitType(vec2ElemType)) {
tryToUpdateInstLitType(
vec2, astContext.getExtVectorType(resultElemType, vec2ElemCount));
}
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvGroupNonUniformOp *inst) {
for (auto *operand : inst->getOperands())
tryToUpdateInstLitType(operand, inst->getAstResultType());
return true;
}
bool LiteralTypeVisitor::visit(SpirvLoad *inst) {
auto *pointer = inst->getPointer();
if (!pointer->hasAstResultType())
return true;
QualType pointerType = pointer->getAstResultType();
if (!isLitTypeOrVecOfLitType(pointerType))
return true;
assert(inst->hasAstResultType());
QualType resultType = inst->getAstResultType();
if (!canDeduceTypeFromLitType(pointerType, resultType))
return true;
QualType newPointerType = astContext.getPointerType(resultType);
pointer->setAstResultType(newPointerType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvStore *inst) {
auto *object = inst->getObject();
auto *pointer = inst->getPointer();
if (pointer->hasAstResultType()) {
QualType type = pointer->getAstResultType();
if (const auto *ptrType = type->getAs<PointerType>())
type = ptrType->getPointeeType();
tryToUpdateInstLitType(object, type);
} else if (pointer->hasResultType()) {
if (auto *ptrType = dyn_cast<HybridPointerType>(pointer->getResultType())) {
QualType type = ptrType->getPointeeType();
tryToUpdateInstLitType(object, type);
}
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvConstantComposite *inst) {
const auto resultType = inst->getAstResultType();
llvm::SmallVector<SpirvInstruction *, 4> constituents(
inst->getConstituents().begin(), inst->getConstituents().end());
updateTypeForCompositeMembers(resultType, constituents);
return true;
}
bool LiteralTypeVisitor::visit(SpirvCompositeConstruct *inst) {
const auto resultType = inst->getAstResultType();
updateTypeForCompositeMembers(resultType, inst->getConstituents());
return true;
}
bool LiteralTypeVisitor::visit(SpirvCompositeExtract *inst) {
const auto resultType = inst->getAstResultType();
auto *base = inst->getComposite();
const auto baseType = base->getAstResultType();
if (isLitTypeOrVecOfLitType(baseType) &&
!isLitTypeOrVecOfLitType(resultType)) {
const uint32_t resultTypeBitwidth = getElementSpirvBitwidth(
astContext, resultType, spvOptions.enable16BitTypes);
const QualType newType =
getTypeWithCustomBitwidth(astContext, baseType, resultTypeBitwidth);
tryToUpdateInstLitType(base, newType);
}
return true;
}
bool LiteralTypeVisitor::updateTypeForCompositeMembers(
QualType compositeType, llvm::ArrayRef<SpirvInstruction *> constituents) {
if (compositeType == QualType())
return true;
// The constituents are the top level objects that create the result type.
// The result type may be one of the following:
// Vector, Array, Matrix, Struct
// TODO: This method is currently not recursive. We can use recursion if
// absolutely necessary.
{ // Vector case
QualType elemType = {};
if (isVectorType(compositeType, &elemType)) {
for (auto *constituent : constituents)
tryToUpdateInstLitType(constituent, elemType);
return true;
}
}
{ // Array case
if (const auto *arrType = dyn_cast<ConstantArrayType>(compositeType)) {
for (auto *constituent : constituents)
tryToUpdateInstLitType(constituent, arrType->getElementType());
return true;
}
}
{ // Matrix case
QualType elemType = {};
if (isMxNMatrix(compositeType, &elemType)) {
for (auto *constituent : constituents) {
// Each constituent is a matrix column (a vector)
uint32_t colSize = 0;
if (isVectorType(constituent->getAstResultType(), nullptr, &colSize)) {
QualType newType = astContext.getExtVectorType(elemType, colSize);
tryToUpdateInstLitType(constituent, newType);
}
}
return true;
}
}
{ // Struct case
if (const auto *structType = compositeType->getAs<RecordType>()) {
const auto *decl = structType->getDecl();
size_t i = 0;
for (const auto *field : decl->fields()) {
// If the field is a bitfield, it might be squashed later when building
// the SPIR-V type depending on context. This means indices starting
// from this bitfield are not guaranteed, and we shouldn't touch them.
if (field->isBitField())
break;
tryToUpdateInstLitType(constituents[i], field->getType());
++i;
}
return true;
}
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvAccessChain *inst) {
for (auto *index : inst->getIndexes()) {
if (auto *constInt = dyn_cast<SpirvConstantInteger>(index)) {
if (!isLiteralLargerThan32Bits(constInt)) {
tryToUpdateInstLitType(
constInt, constInt->getAstResultType()->isSignedIntegerType()
? astContext.IntTy
: astContext.UnsignedIntTy);
}
} else {
tryToUpdateInstLitType(index,
index->getAstResultType()->isSignedIntegerType()
? astContext.IntTy
: astContext.UnsignedIntTy);
}
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvExtInst *inst) {
// Result type of the instruction can provide a hint about its operands. e.g.
// OpExtInst %float %glsl_set Pow %double_2 %double_12
// should be evaluated as:
// OpExtInst %float %glsl_set Pow %float_2 %float_12
const auto resultType = inst->getAstResultType();
for (auto *operand : inst->getOperands())
tryToUpdateInstLitType(operand, resultType);
return true;
}
bool LiteralTypeVisitor::visit(SpirvReturn *inst) {
if (inst->hasReturnValue()) {
tryToUpdateInstLitType(inst->getReturnValue(), curFnAstReturnType);
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvCompositeInsert *inst) {
const auto resultType = inst->getAstResultType();
tryToUpdateInstLitType(inst->getComposite(), resultType);
tryToUpdateInstLitType(inst->getObject(),
getElementType(astContext, resultType));
return true;
}
bool LiteralTypeVisitor::visit(SpirvImageOp *inst) {
if (inst->isImageWrite() && inst->hasAstResultType()) {
const auto sampledType =
hlsl::GetHLSLResourceResultType(inst->getAstResultType());
tryToUpdateInstLitType(inst->getTexelToWrite(), sampledType);
}
return true;
}
bool LiteralTypeVisitor::visit(SpirvSwitch *inst) {
if (auto *constInt = dyn_cast<SpirvConstantInteger>(inst->getSelector())) {
if (isLiteralLargerThan32Bits(constInt)) {
const bool isSigned = constInt->getAstResultType()->isSignedIntegerType();
constInt->setAstResultType(isSigned ? astContext.LongLongTy
: astContext.UnsignedLongLongTy);
}
}
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/DebugTypeVisitor.h | //===--- DebugTypeVisitor.h - Convert AST Type to Debug Type -----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_DEBUGTYPEVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_DEBUGTYPEVISITOR_H
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
#include "llvm/ADT/Optional.h"
namespace clang {
namespace spirv {
class SpirvBuilder;
class LowerTypeVisitor;
/// The class responsible to translate SPIR-V types into DebugType*
/// types as defined in the rich DebugInfo spec.
/// This visitor must be run after the LowerTypeVisitor pass.
class DebugTypeVisitor : public Visitor {
public:
DebugTypeVisitor(ASTContext &astCtx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts, SpirvBuilder &builder,
LowerTypeVisitor &lowerTypeVisitor)
: Visitor(opts, spvCtx), astContext(astCtx), spvContext(spvCtx),
spvBuilder(builder), spvTypeVisitor(lowerTypeVisitor),
currentDebugInstructionLayoutRule(SpirvLayoutRule::Void) {}
// Visiting different SPIR-V constructs.
bool visit(SpirvModule *module, Phase);
bool visit(SpirvBasicBlock *, Phase) { return true; }
bool visit(SpirvFunction *, Phase) { return true; }
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *);
using Visitor::visit;
private:
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation srcLoc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
/// Lowers the type of the given instruction to the corresponding SPIR-V debug
/// type. Adds the debug type instructions to the module.
///
/// The lowering is recursive. All the debug types that the target type
/// depends on will also be created.
SpirvDebugType *lowerToDebugType(const SpirvType *);
/// Lowers DebugTypeComposite.
SpirvDebugType *lowerToDebugTypeComposite(const SpirvType *);
/// Creates DebugTypeComposite for a struct type.
SpirvDebugTypeComposite *createDebugTypeComposite(const SpirvType *type,
const SourceLocation &loc,
uint32_t tag);
/// Adds DebugTypeMembers for member variables to DebugTypeComposite.
void addDebugTypeForMemberVariables(
SpirvDebugTypeComposite *debugTypeComposite, const StructType *type,
llvm::function_ref<SourceLocation()> location, unsigned numBases);
/// Lowers DebugTypeMembers of DebugTypeComposite.
void lowerDebugTypeMembers(SpirvDebugTypeComposite *debugTypeComposite,
const StructType *type, const DeclContext *decl);
/// Lowers DebugTypeTemplate for composite type.
SpirvDebugTypeTemplate *
lowerDebugTypeTemplate(const ClassTemplateSpecializationDecl *templateDecl,
SpirvDebugTypeComposite *debugTypeComposite);
/// Set the result type of debug instructions to OpTypeVoid.
/// According to the rich DebugInfo spec, all debug instructions are
/// OpExtInst with result type of void.
void setDefaultDebugInfo(SpirvDebugInstruction *instr);
SpirvDebugInfoNone *getDebugInfoNone();
private:
ASTContext &astContext; /// AST context
SpirvContext &spvContext; /// SPIR-V context
SpirvBuilder &spvBuilder; ///< SPIR-V builder
LowerTypeVisitor &spvTypeVisitor; /// QualType to SPIR-V type visitor
SpirvLayoutRule currentDebugInstructionLayoutRule; /// SPIR-V layout rule
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_DEBUGTYPEVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RawBufferMethods.cpp | //===---- RawBufferMethods.cpp ---- Raw Buffer Methods ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#include "RawBufferMethods.h"
#include "AlignmentSizeCalculator.h"
#include "LowerTypeVisitor.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "clang/SPIRV/SpirvInstruction.h"
#include <cstdint>
namespace {
/// Rounds the given value up to the given power of 2.
inline uint32_t roundToPow2(uint32_t val, uint32_t pow2) {
assert(pow2 != 0);
return (val + pow2 - 1) & ~(pow2 - 1);
}
} // anonymous namespace
namespace clang {
namespace spirv {
SpirvInstruction *RawBufferHandler::bitCastToNumericalOrBool(
SpirvInstruction *instr, QualType fromType, QualType toType,
SourceLocation loc, SourceRange range) {
if (isSameType(astContext, fromType, toType))
return instr;
if (toType->isBooleanType() || fromType->isBooleanType())
return theEmitter.castToType(instr, fromType, toType, loc, range);
// Perform a bitcast
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, toType, instr, loc,
range);
}
SpirvInstruction *RawBufferHandler::load16Bits(SpirvInstruction *buffer,
BufferAddress &address,
QualType target16BitType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
SpirvInstruction *result = nullptr;
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *constUint3 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 3));
auto *constUint4 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 4));
auto *index = address.getWordIndex(loc, range);
// Take the remainder and multiply by 8 to get the bit offset within the word.
auto *bitOffset = spvBuilder.createBinaryOp(
spv::Op::OpUMod, astContext.UnsignedIntTy, address.getByteAddress(),
constUint4, loc, range);
bitOffset = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
astContext.UnsignedIntTy, bitOffset,
constUint3, loc, range);
// The underlying element type of the ByteAddressBuffer is uint. So we
// need to load 32-bits at the very least.
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
result = spvBuilder.createLoad(astContext.UnsignedIntTy, ptr, loc, range);
result = spvBuilder.createBinaryOp(spv::Op::OpShiftRightLogical,
astContext.UnsignedIntTy, result,
bitOffset, loc, range);
result = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedShortTy, result, loc, range);
result = bitCastToNumericalOrBool(result, astContext.UnsignedShortTy,
target16BitType, loc, range);
result->setRValue();
address.incrementByteAddress(2, loc, range);
return result;
}
SpirvInstruction *RawBufferHandler::load32Bits(SpirvInstruction *buffer,
BufferAddress &address,
QualType target32BitType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
SpirvInstruction *result = nullptr;
// Only need to perform one 32-bit uint load.
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *index = address.getWordIndex(loc, range);
auto *loadPtr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
result = spvBuilder.createLoad(astContext.UnsignedIntTy, loadPtr, loc, range);
result = bitCastToNumericalOrBool(result, astContext.UnsignedIntTy,
target32BitType, loc, range);
result->setRValue();
address.incrementWordIndex(loc, range);
return result;
}
SpirvInstruction *RawBufferHandler::load64Bits(SpirvInstruction *buffer,
BufferAddress &address,
QualType target64BitType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
SpirvInstruction *result = nullptr;
SpirvInstruction *ptr = nullptr;
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *constUint32 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 32));
auto *index = address.getWordIndex(loc, range);
// Need to perform two 32-bit uint loads and construct a 64-bit value.
// Load the first 32-bit uint (word0).
ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
SpirvInstruction *word0 =
spvBuilder.createLoad(astContext.UnsignedIntTy, ptr, loc, range);
// Increment the base index
address.incrementWordIndex(loc, range);
index = address.getWordIndex(loc, range);
// Load the second 32-bit uint (word1).
ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
SpirvInstruction *word1 =
spvBuilder.createLoad(astContext.UnsignedIntTy, ptr, loc, range);
// Convert both word0 and word1 to 64-bit uints.
word0 = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedLongLongTy, word0, loc, range);
word1 = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedLongLongTy, word1, loc, range);
// Shift word1 to the left by 32 bits.
word1 = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
astContext.UnsignedLongLongTy, word1,
constUint32, loc, range);
// BitwiseOr word0 and word1.
result = spvBuilder.createBinaryOp(spv::Op::OpBitwiseOr,
astContext.UnsignedLongLongTy, word0,
word1, loc, range);
result = bitCastToNumericalOrBool(result, astContext.UnsignedLongLongTy,
target64BitType, loc, range);
result->setRValue();
address.incrementWordIndex(loc, range);
return result;
}
SpirvInstruction *RawBufferHandler::processTemplatedLoadFromBuffer(
SpirvInstruction *buffer, BufferAddress &address, const QualType targetType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
SpirvInstruction *result = nullptr;
// Scalar types
if (isScalarType(targetType)) {
SpirvInstruction *scalarResult = nullptr;
auto loadWidth = getElementSpirvBitwidth(
astContext, targetType, theEmitter.getSpirvOptions().enable16BitTypes);
switch (loadWidth) {
case 16:
scalarResult = load16Bits(buffer, address, targetType, range);
break;
case 32:
scalarResult = load32Bits(buffer, address, targetType, range);
break;
case 64:
scalarResult = load64Bits(buffer, address, targetType, range);
break;
default:
theEmitter.emitError(
"templated load of ByteAddressBuffer is only implemented for "
"16, 32, and 64-bit types",
loc);
return nullptr;
}
assert(scalarResult != nullptr);
// We set the layout rule for scalars. Other types are built up from the
// scalars, and should inherit this layout rule or default to Void.
scalarResult->setLayoutRule(SpirvLayoutRule::Void);
return scalarResult;
}
// Vector types
{
QualType elemType = {};
uint32_t elemCount = 0;
if (isVectorType(targetType, &elemType, &elemCount)) {
llvm::SmallVector<SpirvInstruction *, 4> loadedElems;
for (uint32_t i = 0; i < elemCount; ++i) {
loadedElems.push_back(
processTemplatedLoadFromBuffer(buffer, address, elemType, range));
}
result = spvBuilder.createCompositeConstruct(targetType, loadedElems, loc,
range);
result->setRValue();
return result;
}
}
// Array types
{
QualType elemType = {};
uint32_t elemCount = 0;
if (const auto *arrType = astContext.getAsConstantArrayType(targetType)) {
elemCount = static_cast<uint32_t>(arrType->getSize().getZExtValue());
elemType = arrType->getElementType();
llvm::SmallVector<SpirvInstruction *, 4> loadedElems;
for (uint32_t i = 0; i < elemCount; ++i) {
loadedElems.push_back(
processTemplatedLoadFromBuffer(buffer, address, elemType, range));
}
result = spvBuilder.createCompositeConstruct(targetType, loadedElems, loc,
range);
result->setRValue();
return result;
}
}
// Matrix types
{
QualType elemType = {};
uint32_t numRows = 0;
uint32_t numCols = 0;
if (isMxNMatrix(targetType, &elemType, &numRows, &numCols)) {
// In DX, the default matrix orientation in ByteAddressBuffer is column
// major. If HLSL/DXIL support the `column_major` and `row_major`
// attributes in the future, we will have to check for them here and
// override the behavior.
//
// The assume buffer matrix order is controlled by the
// `-fspv-use-legacy-buffer-matrix-order` flag:
// (a) false --> assume the matrix is stored column major
// (b) true --> assume the matrix is stored row major
//
// We provide (b) for compatibility with legacy shaders that depend on
// the previous, incorrect, raw buffer matrix order assumed by the SPIR-V
// codegen.
const bool isBufferColumnMajor =
!theEmitter.getSpirvOptions().useLegacyBufferMatrixOrder;
const uint32_t numElements = numRows * numCols;
llvm::SmallVector<SpirvInstruction *, 16> loadedElems(numElements);
for (uint32_t i = 0; i != numElements; ++i)
loadedElems[i] =
processTemplatedLoadFromBuffer(buffer, address, elemType, range);
llvm::SmallVector<SpirvInstruction *, 4> loadedRows;
for (uint32_t i = 0; i < numRows; ++i) {
llvm::SmallVector<SpirvInstruction *, 4> loadedColumn;
for (uint32_t j = 0; j < numCols; ++j) {
const uint32_t elementIndex =
isBufferColumnMajor ? (j * numRows + i) : (i * numCols + j);
loadedColumn.push_back(loadedElems[elementIndex]);
}
const auto rowType = astContext.getExtVectorType(elemType, numCols);
loadedRows.push_back(spvBuilder.createCompositeConstruct(
rowType, loadedColumn, loc, range));
}
result = spvBuilder.createCompositeConstruct(targetType, loadedRows, loc,
range);
result->setRValue();
return result;
}
}
// Struct types
// The "natural" layout for structure types dictates that structs are
// aligned like their field with the largest alignment.
// As a result, there might exist some padding after some struct members.
if (const auto *structType = targetType->getAs<RecordType>()) {
LowerTypeVisitor lowerTypeVisitor(astContext, theEmitter.getSpirvContext(),
theEmitter.getSpirvOptions(), spvBuilder);
auto *decl = targetType->getAsTagDecl();
assert(decl && "Expected all structs to be tag decls.");
const StructType *spvType = dyn_cast<StructType>(lowerTypeVisitor.lowerType(
targetType, theEmitter.getSpirvOptions().sBufferLayoutRule, llvm::None,
decl->getLocation()));
llvm::SmallVector<SpirvInstruction *, 4> loadedElems;
forEachSpirvField(
structType, spvType,
[this, &buffer, &address, range,
&loadedElems](size_t spirvFieldIndex, const QualType &fieldType,
const auto &field) {
auto *baseOffset = address.getByteAddress();
if (field.offset.hasValue() && field.offset.getValue() != 0) {
const auto loc = buffer->getSourceLocation();
SpirvConstant *offset = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32, field.offset.getValue()));
baseOffset = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, astContext.UnsignedIntTy, baseOffset, offset,
loc, range);
}
loadedElems.push_back(processTemplatedLoadFromBuffer(
buffer, baseOffset, fieldType, range));
return true;
});
// After we're done with loading the entire struct, we need to update the
// byteAddress (in case we are loading an array of structs).
//
// struct size = 34 bytes (34 / 8) = 4 full words (34 % 8) = 2 > 0,
// therefore need to move to the next aligned address So the starting byte
// offset after loading the entire struct is: 8 * (4 + 1) = 40
uint32_t structAlignment = 0, structSize = 0, stride = 0;
std::tie(structAlignment, structSize) =
AlignmentSizeCalculator(astContext, theEmitter.getSpirvOptions())
.getAlignmentAndSize(targetType,
theEmitter.getSpirvOptions().sBufferLayoutRule,
llvm::None, &stride);
assert(structAlignment != 0);
SpirvInstruction *structWidth = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32, roundToPow2(structSize, structAlignment)));
address.incrementByteAddress(structWidth, loc, range);
result = spvBuilder.createCompositeConstruct(targetType, loadedElems, loc,
range);
result->setRValue();
return result;
}
llvm_unreachable("templated buffer load unimplemented for type");
}
SpirvInstruction *RawBufferHandler::processTemplatedLoadFromBuffer(
SpirvInstruction *buffer, SpirvInstruction *byteAddress,
const QualType targetType, SourceRange range) {
BufferAddress address(byteAddress, theEmitter);
return processTemplatedLoadFromBuffer(buffer, address, targetType, range);
}
void RawBufferHandler::store16Bits(SpirvInstruction *value,
SpirvInstruction *buffer,
BufferAddress &address,
const QualType valueType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
SpirvInstruction *result = nullptr;
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *constUint3 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 3));
auto *constUint4 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 4));
auto *constUint16 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 16));
auto *constUintFFFF = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, 0xffff));
auto *index = address.getWordIndex(loc, range);
// Take the remainder and multiply by 8 to get the bit offset within the word.
auto *bitOffset = spvBuilder.createBinaryOp(
spv::Op::OpUMod, astContext.UnsignedIntTy, address.getByteAddress(),
constUint4, loc, range);
bitOffset = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
astContext.UnsignedIntTy, bitOffset,
constUint3, loc, range);
// The underlying element type of the ByteAddressBuffer is uint. So we
// need to store a 32-bit value.
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
result = bitCastToNumericalOrBool(value, valueType,
astContext.UnsignedShortTy, loc, range);
result = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedIntTy, result, loc, range);
result = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
astContext.UnsignedIntTy, result,
bitOffset, loc, range);
auto *maskOffset =
spvBuilder.createBinaryOp(spv::Op::OpISub, astContext.UnsignedIntTy,
constUint16, bitOffset, loc, range);
auto *mask = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
astContext.UnsignedIntTy,
constUintFFFF, maskOffset, loc, range);
// Load and mask the other value in the word.
auto *masked = spvBuilder.createBinaryOp(
spv::Op::OpBitwiseAnd, astContext.UnsignedIntTy,
spvBuilder.createLoad(astContext.UnsignedIntTy, ptr, loc), mask, loc,
range);
result =
spvBuilder.createBinaryOp(spv::Op::OpBitwiseOr, astContext.UnsignedIntTy,
masked, result, loc, range);
spvBuilder.createStore(ptr, result, loc, range);
address.incrementByteAddress(2, loc, range);
}
void RawBufferHandler::store32Bits(SpirvInstruction *value,
SpirvInstruction *buffer,
BufferAddress &address,
const QualType valueType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *index = address.getWordIndex(loc, range);
// The underlying element type of the ByteAddressBuffer is uint. So we
// need to store a 32-bit value.
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
value = bitCastToNumericalOrBool(value, valueType, astContext.UnsignedIntTy,
loc, range);
spvBuilder.createStore(ptr, value, loc, range);
address.incrementWordIndex(loc, range);
}
void RawBufferHandler::store64Bits(SpirvInstruction *value,
SpirvInstruction *buffer,
BufferAddress &address,
const QualType valueType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *constUint32 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 32));
auto *index = address.getWordIndex(loc, range);
// The underlying element type of the ByteAddressBuffer is uint. So we
// need to store two 32-bit values.
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
// First convert the 64-bit value to uint64_t. Then extract two 32-bit words
// from it.
value = bitCastToNumericalOrBool(value, valueType,
astContext.UnsignedLongLongTy, loc, range);
// Use OpUConvert to perform truncation (produces the least significant bits).
SpirvInstruction *lsb = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedIntTy, value, loc, range);
// Shift uint64_t to the right by 32 bits and truncate to get the most
// significant bits.
SpirvInstruction *msb = spvBuilder.createUnaryOp(
spv::Op::OpUConvert, astContext.UnsignedIntTy,
spvBuilder.createBinaryOp(spv::Op::OpShiftRightLogical,
astContext.UnsignedLongLongTy, value,
constUint32, loc, range),
loc, range);
spvBuilder.createStore(ptr, lsb, loc, range);
address.incrementWordIndex(loc, range);
index = address.getWordIndex(loc, range);
ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, buffer,
{constUint0, index}, loc, range);
spvBuilder.createStore(ptr, msb, loc, range);
address.incrementWordIndex(loc, range);
}
QualType RawBufferHandler::serializeToScalarsOrStruct(
std::deque<SpirvInstruction *> *values, QualType valueType,
SourceLocation loc, SourceRange range) {
uint32_t size = values->size();
// Vector type
{
QualType elemType = {};
uint32_t elemCount = 0;
if (isVectorType(valueType, &elemType, &elemCount)) {
for (uint32_t i = 0; i < size; ++i) {
for (uint32_t j = 0; j < elemCount; ++j) {
values->push_back(spvBuilder.createCompositeExtract(
elemType, values->front(), {j}, loc, range));
}
values->pop_front();
}
return elemType;
}
}
// Matrix type
{
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
if (isMxNMatrix(valueType, &elemType, &numRows, &numCols)) {
// Check if the destination buffer expects matrices in column major or row
// major order. In the future, we may also need to consider the
// `row_major` and `column_major` attribures. This is not handled by
// HLSL/DXIL at the moment, so we ignore them too.
const bool isBufferColumnMajor =
!theEmitter.getSpirvOptions().useLegacyBufferMatrixOrder;
for (uint32_t i = 0; i < size; ++i) {
if (isBufferColumnMajor) {
// Access the matrix in the column major order.
for (uint32_t j = 0; j != numCols; ++j) {
for (uint32_t k = 0; k != numRows; ++k) {
values->push_back(spvBuilder.createCompositeExtract(
elemType, values->front(), {k, j}, loc, range));
}
}
} else {
// Access the matrix in the row major order.
for (uint32_t j = 0; j != numRows; ++j) {
for (uint32_t k = 0; k != numCols; ++k) {
values->push_back(spvBuilder.createCompositeExtract(
elemType, values->front(), {j, k}, loc, range));
}
}
}
values->pop_front();
}
return serializeToScalarsOrStruct(values, elemType, loc, range);
}
}
// Array type
{
if (const auto *arrType = astContext.getAsConstantArrayType(valueType)) {
const uint32_t arrElemCount =
static_cast<uint32_t>(arrType->getSize().getZExtValue());
const QualType arrElemType = arrType->getElementType();
for (uint32_t i = 0; i < size; ++i) {
for (uint32_t j = 0; j < arrElemCount; ++j) {
values->push_back(spvBuilder.createCompositeExtract(
arrElemType, values->front(), {j}, loc, range));
}
values->pop_front();
}
return serializeToScalarsOrStruct(values, arrElemType, loc, range);
}
}
if (isScalarType(valueType))
return valueType;
if (valueType->getAs<RecordType>())
return valueType;
llvm_unreachable("unhandled type when serializing an array");
}
void RawBufferHandler::processTemplatedStoreToBuffer(SpirvInstruction *value,
SpirvInstruction *buffer,
BufferAddress &address,
const QualType valueType,
SourceRange range) {
const auto loc = buffer->getSourceLocation();
// Scalar types
if (isScalarType(valueType)) {
auto storeWidth = getElementSpirvBitwidth(
astContext, valueType, theEmitter.getSpirvOptions().enable16BitTypes);
switch (storeWidth) {
case 16:
store16Bits(value, buffer, address, valueType, range);
return;
case 32:
store32Bits(value, buffer, address, valueType, range);
return;
case 64:
store64Bits(value, buffer, address, valueType, range);
return;
default:
theEmitter.emitError(
"templated store of ByteAddressBuffer is only implemented for "
"16, 32, and 64-bit types",
loc);
return;
}
}
// Vectors, Matrices, and Arrays can all be serialized and stored.
if (isVectorType(valueType) || isMxNMatrix(valueType) ||
isConstantArrayType(astContext, valueType)) {
std::deque<SpirvInstruction *> elems;
elems.push_back(value);
auto serializedType =
serializeToScalarsOrStruct(&elems, valueType, loc, range);
if (isScalarType(serializedType) || serializedType->getAs<RecordType>()) {
for (auto elem : elems)
processTemplatedStoreToBuffer(elem, buffer, address, serializedType,
range);
}
return;
}
// Struct types
// The "natural" layout for structure types dictates that structs are
// aligned like their field with the largest alignment.
// As a result, there might exist some padding after some struct members.
if (const auto *structType = valueType->getAs<RecordType>()) {
LowerTypeVisitor lowerTypeVisitor(astContext, theEmitter.getSpirvContext(),
theEmitter.getSpirvOptions(), spvBuilder);
auto *decl = valueType->getAsTagDecl();
assert(decl && "Expected all structs to be tag decls.");
const StructType *spvType = dyn_cast<StructType>(lowerTypeVisitor.lowerType(
valueType, theEmitter.getSpirvOptions().sBufferLayoutRule, llvm::None,
decl->getLocation()));
assert(spvType);
forEachSpirvField(
structType, spvType,
[this, &address, loc, range, buffer, value](size_t spirvFieldIndex,
const QualType &fieldType,
const auto &field) {
auto *baseOffset = address.getByteAddress();
if (field.offset.hasValue() && field.offset.getValue() != 0) {
SpirvConstant *offset = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32, field.offset.getValue()));
baseOffset = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, astContext.UnsignedIntTy, baseOffset, offset,
loc, range);
}
processTemplatedStoreToBuffer(
spvBuilder.createCompositeExtract(
fieldType, value, {static_cast<uint32_t>(spirvFieldIndex)},
loc, range),
buffer, baseOffset, fieldType, range);
return true;
});
// After we're done with storing the entire struct, we need to update the
// byteAddress (in case we are storing an array of structs).
//
// Example: struct alignment = 8. struct size = 34 bytes
// (34 / 8) = 4 full words
// (34 % 8) = 2 > 0, therefore need to move to the next aligned address
// So the starting byte offset after loading the entire struct is:
// 8 * (4 + 1) = 40
uint32_t structAlignment = 0, structSize = 0, stride = 0;
std::tie(structAlignment, structSize) =
AlignmentSizeCalculator(astContext, theEmitter.getSpirvOptions())
.getAlignmentAndSize(valueType,
theEmitter.getSpirvOptions().sBufferLayoutRule,
llvm::None, &stride);
assert(structAlignment != 0);
auto *structWidth = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32, roundToPow2(structSize, structAlignment)));
address.incrementByteAddress(structWidth, loc, range);
return;
}
llvm_unreachable("templated buffer store unimplemented for type");
}
void RawBufferHandler::processTemplatedStoreToBuffer(
SpirvInstruction *value, SpirvInstruction *buffer,
SpirvInstruction *&byteAddress, const QualType valueType,
SourceRange range) {
BufferAddress address(byteAddress, theEmitter);
processTemplatedStoreToBuffer(value, buffer, address, valueType, range);
}
SpirvInstruction *RawBufferHandler::BufferAddress::getByteAddress() {
return byteAddress;
}
SpirvInstruction *
RawBufferHandler::BufferAddress::getWordIndex(SourceLocation loc,
SourceRange range) {
if (!wordIndex.hasValue()) {
auto *constUint2 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 2));
// Divide the byte index by 4 (shift right by 2) to get the index in the
// word-sized buffer.
wordIndex = spvBuilder.createBinaryOp(spv::Op::OpShiftRightLogical,
astContext.UnsignedIntTy, byteAddress,
constUint2, loc, range);
}
return wordIndex.getValue();
}
void RawBufferHandler::BufferAddress::incrementByteAddress(
SpirvInstruction *width, SourceLocation loc, SourceRange range) {
byteAddress =
spvBuilder.createBinaryOp(spv::Op::OpIAdd, astContext.UnsignedIntTy,
byteAddress, width, loc, range);
wordIndex.reset();
}
void RawBufferHandler::BufferAddress::incrementByteAddress(uint32_t width,
SourceLocation loc,
SourceRange range) {
incrementByteAddress(spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, width)),
loc, range);
}
void RawBufferHandler::BufferAddress::incrementWordIndex(SourceLocation loc,
SourceRange range) {
auto *constUint1 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 1));
auto *oldWordIndex = getWordIndex(loc, range);
// Keep byte address up-to-date. If this is unneeded the optimizer will remove
// it.
incrementByteAddress(4, loc, range);
wordIndex =
spvBuilder.createBinaryOp(spv::Op::OpIAdd, astContext.UnsignedIntTy,
oldWordIndex, constUint1, loc, range);
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/AstTypeProbe.cpp | //===--- TypeProbe.cpp - Static functions for probing QualType ---*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/HlslTypes.h"
namespace {
template <unsigned N>
clang::DiagnosticBuilder emitError(const clang::ASTContext &astContext,
const char (&message)[N],
clang::SourceLocation srcLoc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
} // namespace
namespace clang {
namespace spirv {
std::string getFunctionOrOperatorName(const FunctionDecl *fn,
bool addClassNameWithOperator) {
auto operatorKind = fn->getOverloadedOperator();
if (operatorKind == OO_None)
return fn->getNameAsString();
if (const auto *cxxMethodDecl = dyn_cast<CXXMethodDecl>(fn)) {
std::string prefix =
addClassNameWithOperator
? cxxMethodDecl->getParent()->getNameAsString() + "."
: "";
switch (operatorKind) {
#ifdef OVERLOADED_OPERATOR
#undef OVERLOADED_OPERATOR
#endif
#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
case OO_##Name: \
return prefix + "operator." #Name;
#include "clang/Basic/OperatorKinds.def"
default:
break;
}
}
llvm_unreachable("unknown overloaded operator type");
}
std::string getAstTypeName(QualType type) {
{
QualType ty = {};
if (isScalarType(type, &ty))
if (const auto *builtinType = ty->getAs<BuiltinType>())
switch (builtinType->getKind()) {
case BuiltinType::Void:
return "void";
case BuiltinType::Bool:
return "bool";
case BuiltinType::Int:
return "int";
case BuiltinType::UInt:
return "uint";
case BuiltinType::Float:
return "float";
case BuiltinType::Double:
return "double";
case BuiltinType::LongLong:
return "int64";
case BuiltinType::ULongLong:
return "uint64";
case BuiltinType::Short:
return "short";
case BuiltinType::UShort:
return "ushort";
case BuiltinType::Half:
case BuiltinType::HalfFloat:
return "half";
case BuiltinType::Min12Int:
return "min12int";
case BuiltinType::Min16Int:
return "min16int";
case BuiltinType::Min16UInt:
return "min16uint";
case BuiltinType::Min16Float:
return "min16float";
case BuiltinType::Min10Float:
return "min10float";
default:
return "";
}
}
{
QualType elemType = {};
uint32_t elemCount = {};
if (isVectorType(type, &elemType, &elemCount))
return "v" + std::to_string(elemCount) + getAstTypeName(elemType);
}
{
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount))
return "mat" + std::to_string(rowCount) + "v" + std::to_string(colCount) +
getAstTypeName(elemType);
}
if (const auto *structType = type->getAs<RecordType>())
return structType->getDecl()->getName();
return "";
}
bool isScalarType(QualType type, QualType *scalarType) {
bool isScalar = false;
QualType ty = {};
if (type->isBuiltinType() || isEnumType(type)) {
isScalar = true;
ty = type;
} else if (hlsl::IsHLSLVecType(type) && hlsl::GetHLSLVecSize(type) == 1) {
isScalar = true;
ty = hlsl::GetHLSLVecElementType(type);
} else if (const auto *extVecType =
dyn_cast<ExtVectorType>(type.getTypePtr())) {
if (extVecType->getNumElements() == 1) {
isScalar = true;
ty = extVecType->getElementType();
}
} else if (is1x1Matrix(type)) {
isScalar = true;
ty = hlsl::GetHLSLMatElementType(type);
}
if (isScalar && scalarType)
*scalarType = ty;
return isScalar;
}
bool isVectorType(QualType type, QualType *elemType, uint32_t *elemCount) {
bool isVec = false;
QualType ty = {};
uint32_t count = 0;
if (hlsl::IsHLSLVecType(type)) {
ty = hlsl::GetHLSLVecElementType(type);
count = hlsl::GetHLSLVecSize(type);
isVec = count > 1;
} else if (const auto *extVecType =
dyn_cast<ExtVectorType>(type.getTypePtr())) {
ty = extVecType->getElementType();
count = extVecType->getNumElements();
isVec = count > 1;
} else if (hlsl::IsHLSLMatType(type)) {
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
ty = hlsl::GetHLSLMatElementType(type);
count = rowCount == 1 ? colCount : rowCount;
isVec = (rowCount == 1) != (colCount == 1);
}
if (isVec) {
if (elemType)
*elemType = ty;
if (elemCount)
*elemCount = count;
}
return isVec;
}
bool isScalarOrVectorType(QualType type, QualType *elemType,
uint32_t *elemCount) {
if (isScalarType(type, elemType)) {
if (elemCount)
*elemCount = 1;
return true;
}
return isVectorType(type, elemType, elemCount);
}
bool isConstantArrayType(const ASTContext &astContext, QualType type) {
return astContext.getAsConstantArrayType(type) != nullptr;
}
bool isEnumType(QualType type) {
if (isa<EnumType>(type.getTypePtr()))
return true;
if (const auto *elaboratedType = type->getAs<ElaboratedType>())
if (isa<EnumType>(elaboratedType->desugar().getTypePtr()))
return true;
return false;
}
bool is1x1Matrix(QualType type, QualType *elemType) {
if (!hlsl::IsHLSLMatType(type))
return false;
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (rowCount == 1 && colCount == 1) {
if (elemType)
*elemType = hlsl::GetHLSLMatElementType(type);
return true;
}
return false;
}
bool is1xNMatrix(QualType type, QualType *elemType, uint32_t *elemCount) {
if (!hlsl::IsHLSLMatType(type))
return false;
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (rowCount == 1 && colCount > 1) {
if (elemType)
*elemType = hlsl::GetHLSLMatElementType(type);
if (elemCount)
*elemCount = colCount;
return true;
}
return false;
}
bool isMx1Matrix(QualType type, QualType *elemType, uint32_t *elemCount) {
if (!hlsl::IsHLSLMatType(type))
return false;
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (rowCount > 1 && colCount == 1) {
if (elemType)
*elemType = hlsl::GetHLSLMatElementType(type);
if (elemCount)
*elemCount = rowCount;
return true;
}
return false;
}
bool isMxNMatrix(QualType type, QualType *elemType, uint32_t *numRows,
uint32_t *numCols) {
if (!hlsl::IsHLSLMatType(type))
return false;
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (rowCount > 1 && colCount > 1) {
if (elemType)
*elemType = hlsl::GetHLSLMatElementType(type);
if (numRows)
*numRows = rowCount;
if (numCols)
*numCols = colCount;
return true;
}
return false;
}
bool isInputPatch(QualType type) {
if (const auto *rt = type->getAs<RecordType>())
return rt->getDecl()->getName() == "InputPatch";
return false;
}
bool isOutputPatch(QualType type) {
if (const auto *rt = type->getAs<RecordType>())
return rt->getDecl()->getName() == "OutputPatch";
return false;
}
bool isSubpassInput(QualType type) {
if (const auto *rt = type->getAs<RecordType>())
return rt->getDecl()->getName() == "SubpassInput";
return false;
}
bool isSubpassInputMS(QualType type) {
if (const auto *rt = type->getAs<RecordType>())
return rt->getDecl()->getName() == "SubpassInputMS";
return false;
}
bool isArrayType(QualType type, QualType *elemType, uint32_t *elemCount) {
if (const auto *arrayType = type->getAsArrayTypeUnsafe()) {
if (elemType)
*elemType = arrayType->getElementType();
if (elemCount)
*elemCount = hlsl::GetArraySize(type);
return true;
}
return false;
}
bool isConstantBuffer(clang::QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
return name == "ConstantBuffer";
}
return false;
}
bool isTextureBuffer(clang::QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
return name == "TextureBuffer";
}
return false;
}
bool isConstantTextureBuffer(QualType type) {
return isConstantBuffer(type) || isTextureBuffer(type);
}
bool isResourceType(QualType type) {
// Deprive the arrayness to see the element type
while (type->isArrayType()) {
type = type->getAsArrayTypeUnsafe()->getElementType();
}
if (isSubpassInput(type) || isSubpassInputMS(type) || isInputPatch(type) ||
isOutputPatch(type))
return true;
return hlsl::IsHLSLResourceType(type);
}
bool isOrContains16BitType(QualType type, bool enable16BitTypesOption) {
// Primitive types
{
QualType ty = {};
if (isScalarType(type, &ty)) {
if (const auto *builtinType = ty->getAs<BuiltinType>()) {
switch (builtinType->getKind()) {
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
return enable16BitTypesOption;
// the 'Half' enum always represents 16-bit and 'HalfFloat' always
// represents 32-bit floats.
// int16_t and uint16_t map to Short and UShort
case BuiltinType::Short:
case BuiltinType::UShort:
case BuiltinType::Half:
return true;
default:
return false;
}
}
}
}
// Vector types
{
QualType elemType = {};
if (isVectorType(type, &elemType))
return isOrContains16BitType(elemType, enable16BitTypesOption);
}
// Matrix types
{
QualType elemType = {};
if (isMxNMatrix(type, &elemType)) {
return isOrContains16BitType(elemType, enable16BitTypesOption);
}
}
// Struct type
if (const auto *structType = type->getAs<RecordType>()) {
const auto *decl = structType->getDecl();
for (const auto *field : decl->fields()) {
if (isOrContains16BitType(field->getType(), enable16BitTypesOption))
return true;
}
return false;
}
// Array type
if (const auto *arrayType = type->getAsArrayTypeUnsafe()) {
return isOrContains16BitType(arrayType->getElementType(),
enable16BitTypesOption);
}
// Reference types
if (const auto *refType = type->getAs<ReferenceType>()) {
return isOrContains16BitType(refType->getPointeeType(),
enable16BitTypesOption);
}
// Pointer types
if (const auto *ptrType = type->getAs<PointerType>()) {
return isOrContains16BitType(ptrType->getPointeeType(),
enable16BitTypesOption);
}
if (const auto *typedefType = type->getAs<TypedefType>()) {
return isOrContains16BitType(typedefType->desugar(),
enable16BitTypesOption);
}
llvm_unreachable("checking 16-bit type unimplemented");
return 0;
}
uint32_t getElementSpirvBitwidth(const ASTContext &astContext, QualType type,
bool is16BitTypeEnabled) {
const auto canonicalType = type.getCanonicalType();
if (canonicalType != type)
return getElementSpirvBitwidth(astContext, canonicalType,
is16BitTypeEnabled);
// Vector types
{
QualType elemType = {};
if (isVectorType(type, &elemType))
return getElementSpirvBitwidth(astContext, elemType, is16BitTypeEnabled);
}
// Matrix types
if (hlsl::IsHLSLMatType(type))
return getElementSpirvBitwidth(
astContext, hlsl::GetHLSLMatElementType(type), is16BitTypeEnabled);
// Array types
if (const auto *arrayType = type->getAsArrayTypeUnsafe()) {
return getElementSpirvBitwidth(astContext, arrayType->getElementType(),
is16BitTypeEnabled);
}
// Typedefs
if (const auto *typedefType = type->getAs<TypedefType>())
return getElementSpirvBitwidth(astContext, typedefType->desugar(),
is16BitTypeEnabled);
// Reference types
if (const auto *refType = type->getAs<ReferenceType>())
return getElementSpirvBitwidth(astContext, refType->getPointeeType(),
is16BitTypeEnabled);
// Pointer types
if (const auto *ptrType = type->getAs<PointerType>())
return getElementSpirvBitwidth(astContext, ptrType->getPointeeType(),
is16BitTypeEnabled);
// Enum types
if (isEnumType(type))
return 32;
// Scalar types
QualType ty = {};
const bool isScalar = isScalarType(type, &ty);
assert(isScalar);
(void)isScalar;
if (const auto *builtinType = ty->getAs<BuiltinType>()) {
switch (builtinType->getKind()) {
case BuiltinType::Bool:
case BuiltinType::Int:
case BuiltinType::UInt:
case BuiltinType::Int8_4Packed:
case BuiltinType::UInt8_4Packed:
case BuiltinType::Float:
case BuiltinType::Long:
case BuiltinType::ULong:
return 32;
case BuiltinType::Double:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
return 64;
// Half builtin type is always 16-bit. The HLSL 'half' keyword is translated
// to 'Half' enum if -enable-16bit-types is true.
// int16_t and uint16_t map to Short and UShort
case BuiltinType::Half:
case BuiltinType::Short:
case BuiltinType::UShort:
return 16;
// HalfFloat builtin type is just an alias for Float builtin type and is
// always 32-bit. The HLSL 'half' keyword is translated to 'HalfFloat' enum
// if -enable-16bit-types is false.
case BuiltinType::HalfFloat:
return 32;
case BuiltinType::UChar:
case BuiltinType::Char_U:
case BuiltinType::SChar:
case BuiltinType::Char_S:
return 8;
// The following types are treated as 16-bit if '-enable-16bit-types' option
// is enabled. They are treated as 32-bit otherwise.
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::Min16Float:
case BuiltinType::Min10Float: {
return is16BitTypeEnabled ? 16 : 32;
}
case BuiltinType::LitFloat: {
return 64;
}
case BuiltinType::LitInt: {
return 64;
}
default:
// Other builtin types are either not relevant to bitcount or not in HLSL.
break;
}
}
llvm_unreachable("invalid type passed to getElementSpirvBitwidth");
}
bool canTreatAsSameScalarType(QualType type1, QualType type2) {
// Treat const int/float the same as const int/float
type1.removeLocalConst();
type2.removeLocalConst();
return (type1.getCanonicalType() == type2.getCanonicalType()) ||
// Treat uint8_t4_packed and int8_t4_packed as the same because they
// are both repressented as 32-bit unsigned integers in SPIR-V.
(type1->isSpecificBuiltinType(BuiltinType::Int8_4Packed) &&
type2->isSpecificBuiltinType(BuiltinType::UInt8_4Packed)) ||
(type2->isSpecificBuiltinType(BuiltinType::Int8_4Packed) &&
type1->isSpecificBuiltinType(BuiltinType::UInt8_4Packed)) ||
// Treat uint8_t4_packed and uint32_t as the same because they
// are both repressented as 32-bit unsigned integers in SPIR-V.
(type1->isSpecificBuiltinType(BuiltinType::UInt) &&
type2->isSpecificBuiltinType(BuiltinType::UInt8_4Packed)) ||
(type2->isSpecificBuiltinType(BuiltinType::UInt) &&
type1->isSpecificBuiltinType(BuiltinType::UInt8_4Packed)) ||
// Treat int8_t4_packed and uint32_t as the same because they
// are both repressented as 32-bit unsigned integers in SPIR-V.
(type1->isSpecificBuiltinType(BuiltinType::UInt) &&
type2->isSpecificBuiltinType(BuiltinType::Int8_4Packed)) ||
(type2->isSpecificBuiltinType(BuiltinType::UInt) &&
type1->isSpecificBuiltinType(BuiltinType::Int8_4Packed)) ||
// Treat 'literal float' and 'float' as the same
(type1->isSpecificBuiltinType(BuiltinType::LitFloat) &&
type2->isFloatingType()) ||
(type2->isSpecificBuiltinType(BuiltinType::LitFloat) &&
type1->isFloatingType());
}
bool canFitIntoOneRegister(const ASTContext &astContext, QualType structType,
QualType *elemType, uint32_t *elemCount) {
if (structType->getAsStructureType() == nullptr)
return false;
const auto *structDecl = structType->getAsStructureType()->getDecl();
QualType firstElemType;
uint32_t totalCount = 0;
for (const auto *field : structDecl->fields()) {
QualType type;
uint32_t count = 1;
if (isScalarType(field->getType(), &type) ||
isVectorType(field->getType(), &type, &count)) {
if (firstElemType.isNull()) {
firstElemType = type;
} else {
if (!canTreatAsSameScalarType(firstElemType, type)) {
emitError(astContext,
"all struct members should have the same element type for "
"resource template instantiation",
structDecl->getLocation());
return false;
}
}
totalCount += count;
} else {
emitError(
astContext,
"unsupported struct element type for resource template instantiation",
structDecl->getLocation());
return false;
}
}
if (totalCount > 4) {
emitError(
astContext,
"resource template element type %0 cannot fit into four 32-bit scalars",
structDecl->getLocation())
<< structType;
return false;
}
if (elemType)
*elemType = firstElemType;
if (elemCount)
*elemCount = totalCount;
return true;
}
QualType getElementType(const ASTContext &astContext, QualType type) {
QualType elemType = {};
if (isScalarType(type, &elemType) || isVectorType(type, &elemType) ||
isMxNMatrix(type, &elemType) ||
canFitIntoOneRegister(astContext, type, &elemType)) {
return elemType;
}
if (const auto *arrType = dyn_cast<ConstantArrayType>(type)) {
return arrType->getElementType();
}
assert(false && "unsupported resource type parameter");
return type;
}
QualType getTypeWithCustomBitwidth(const ASTContext &ctx, QualType type,
uint32_t bitwidth) {
// Cases where the given type is a vector of float/int.
{
QualType elemType = {};
uint32_t elemCount = 0;
const bool isVec = isVectorType(type, &elemType, &elemCount);
if (isVec) {
return ctx.getExtVectorType(
getTypeWithCustomBitwidth(ctx, elemType, bitwidth), elemCount);
}
}
// It could be a vector of size 1, which is treated as a scalar.
if (hlsl::IsHLSLVecType(type)) {
assert(hlsl::GetHLSLVecSize(type) == 1);
type = hlsl::GetHLSLVecElementType(type);
}
// Scalar cases.
assert(!type->isBooleanType());
assert(type->isIntegerType() || type->isFloatingType());
if (type->isFloatingType()) {
switch (bitwidth) {
case 16:
return ctx.HalfTy;
case 32:
return ctx.FloatTy;
case 64:
return ctx.DoubleTy;
}
}
if (type->isSignedIntegerType()) {
switch (bitwidth) {
case 16:
return ctx.ShortTy;
case 32:
return ctx.IntTy;
case 64:
return ctx.LongLongTy;
}
}
if (type->isUnsignedIntegerType()) {
switch (bitwidth) {
case 16:
return ctx.UnsignedShortTy;
case 32:
return ctx.UnsignedIntTy;
case 64:
return ctx.UnsignedLongLongTy;
}
}
llvm_unreachable(
"invalid type or bitwidth passed to getTypeWithCustomBitwidth");
}
bool isMatrixOrArrayOfMatrix(const ASTContext &context, QualType type) {
if (isMxNMatrix(type)) {
return true;
}
if (const auto *arrayType = context.getAsArrayType(type))
return isMatrixOrArrayOfMatrix(context, arrayType->getElementType());
return false;
}
bool isLitTypeOrVecOfLitType(QualType type) {
if (type == QualType())
return false;
if (type->isSpecificBuiltinType(BuiltinType::LitInt) ||
type->isSpecificBuiltinType(BuiltinType::LitFloat))
return true;
// For vector cases
{
QualType elemType = {};
uint32_t elemCount = 0;
if (isVectorType(type, &elemType, &elemCount))
return isLitTypeOrVecOfLitType(elemType);
}
return false;
}
bool isSameScalarOrVecType(QualType type1, QualType type2) {
{ // Scalar types
QualType scalarType1 = {}, scalarType2 = {};
if (isScalarType(type1, &scalarType1) && isScalarType(type2, &scalarType2))
return canTreatAsSameScalarType(scalarType1, scalarType2);
}
{ // Vector types
QualType elemType1 = {}, elemType2 = {};
uint32_t count1 = {}, count2 = {};
if (!isVectorType(type1, &elemType1, &count1) ||
!isVectorType(type2, &elemType2, &count2))
return false;
if (count1 != count2)
return false;
// That's a corner case we had to add to solve #4727.
// Normally, clang doesn't have the 'literal type', thus we can rely on
// direct type check. But this flavor of the AST has this 'literal int' type
// that is sign-less (nor signed or unsigned), until usage. Obviously,
// int(3) == literal int (3), but since they are considered different in the
// AST, we must check explicitly. Note: this is only valid here, as this is
// related to a vector size. Considering int == literal int elsewhere could
// break codegen, as SPIR-V does need explicit signedness.
return canTreatAsSameScalarType(elemType1, elemType2) ||
(elemType1->isIntegerType() &&
elemType2->isSpecificBuiltinType(BuiltinType::LitInt)) ||
(elemType2->isIntegerType() &&
elemType1->isSpecificBuiltinType(BuiltinType::LitInt));
}
return false;
}
bool isSameType(const ASTContext &astContext, QualType type1, QualType type2) {
if (isSameScalarOrVecType(type1, type2))
return true;
type1.removeLocalConst();
type2.removeLocalConst();
{ // Matrix types
QualType elemType1 = {}, elemType2 = {};
uint32_t row1 = 0, row2 = 0, col1 = 0, col2 = 0;
if (isMxNMatrix(type1, &elemType1, &row1, &col1) &&
isMxNMatrix(type2, &elemType2, &row2, &col2))
return row1 == row2 && col1 == col2 &&
canTreatAsSameScalarType(elemType1, elemType2);
}
{ // Array types
if (const auto *arrType1 = astContext.getAsConstantArrayType(type1))
if (const auto *arrType2 = astContext.getAsConstantArrayType(type2))
return hlsl::GetArraySize(type1) == hlsl::GetArraySize(type2) &&
isSameType(astContext, arrType1->getElementType(),
arrType2->getElementType());
}
{ // Two structures with identical fields
if (const auto *structType1 = type1->getAs<RecordType>()) {
if (const auto *structType2 = type2->getAs<RecordType>()) {
llvm::SmallVector<QualType, 4> fieldTypes1;
llvm::SmallVector<QualType, 4> fieldTypes2;
for (const auto *field : structType1->getDecl()->fields())
fieldTypes1.push_back(field->getType());
for (const auto *field : structType2->getDecl()->fields())
fieldTypes2.push_back(field->getType());
// Note: We currently do NOT consider such cases as equal types:
// struct s1 { int x; int y; }
// struct s2 { int2 x; }
// Therefore if two structs have different number of members, we
// consider them different.
if (fieldTypes1.size() != fieldTypes2.size())
return false;
for (size_t i = 0; i < fieldTypes1.size(); ++i)
if (!isSameType(astContext, fieldTypes1[i], fieldTypes2[i]))
return false;
return true;
}
}
}
// TODO: support other types if needed
return false;
}
QualType desugarType(QualType type, llvm::Optional<bool> *isRowMajor) {
if (const auto *attrType = type->getAs<AttributedType>()) {
switch (auto kind = attrType->getAttrKind()) {
// HLSL row-major is SPIR-V col-major
case AttributedType::attr_hlsl_row_major:
*isRowMajor = false;
break;
// HLSL col-major is SPIR-V row-major
case AttributedType::attr_hlsl_column_major:
*isRowMajor = true;
break;
default:
// Only looking matrix majorness attributes.
break;
}
return desugarType(attrType->getLocallyUnqualifiedSingleStepDesugaredType(),
isRowMajor);
}
if (const auto *typedefType = type->getAs<TypedefType>()) {
return desugarType(typedefType->desugar(), isRowMajor);
}
return type;
}
bool isRowMajorMatrix(const SpirvCodeGenOptions &spvOptions, QualType type) {
// SPIR-V row-major is HLSL col-major and SPIR-V col-major is HLSL row-major.
bool attrRowMajor = false;
if (hlsl::HasHLSLMatOrientation(type, &attrRowMajor))
return !attrRowMajor;
// If it is a templated type the attribute may have been applied to the
// underlying type. For example: StructuredBuffer<row_major float2x3>
if (const auto *tst = dyn_cast<clang::TemplateSpecializationType>(type)) {
if (tst->getNumArgs() >= 1) {
auto args = tst->getArgs();
auto templateArgument = args[0];
auto templateArgumentType = templateArgument.getAsType();
return isRowMajorMatrix(spvOptions, templateArgumentType);
}
}
return !spvOptions.defaultRowMajor;
}
bool isStructuredBuffer(QualType type) {
const auto *recordType = type->getAs<RecordType>();
if (!recordType)
return false;
const auto name = recordType->getDecl()->getName();
return name == "StructuredBuffer" || name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer";
}
bool isNonWritableStructuredBuffer(QualType type) {
const auto *recordType = type->getAs<RecordType>();
if (!recordType)
return false;
const auto name = recordType->getDecl()->getName();
return name == "StructuredBuffer";
}
bool isByteAddressBuffer(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
return rt->getDecl()->getName() == "ByteAddressBuffer";
}
return false;
}
bool isRWBuffer(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
return name == "RWBuffer" || name == "RasterizerOrderedBuffer";
}
return false;
}
bool isBuffer(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
return rt->getDecl()->getName() == "Buffer";
}
return false;
}
bool isRWTexture(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
if (name == "RWTexture1D" || name == "RWTexture1DArray" ||
name == "RWTexture2D" || name == "RWTexture2DArray" ||
name == "RWTexture3D" || name == "RasterizerOrderedTexture1D" ||
name == "RasterizerOrderedTexture1DArray" ||
name == "RasterizerOrderedTexture2D" ||
name == "RasterizerOrderedTexture2DArray" ||
name == "RasterizerOrderedTexture3D")
return true;
}
return false;
}
bool isTexture(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
if (name == "Texture1D" || name == "Texture1DArray" ||
name == "Texture2D" || name == "Texture2DArray" ||
name == "Texture2DMS" || name == "Texture2DMSArray" ||
name == "TextureCube" || name == "TextureCubeArray" ||
name == "Texture3D")
return true;
}
return false;
}
bool isTextureMS(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
if (name == "Texture2DMS" || name == "Texture2DMSArray")
return true;
}
return false;
}
bool isSampler(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
if (name == "SamplerState" || name == "SamplerComparisonState")
return true;
}
return false;
}
bool isRWByteAddressBuffer(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
const auto name = rt->getDecl()->getName();
return name == "RWByteAddressBuffer" ||
name == "RasterizerOrderedByteAddressBuffer";
}
return false;
}
bool isAppendStructuredBuffer(QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
const auto *recordType = type->getAs<RecordType>();
if (!recordType)
return false;
const auto name = recordType->getDecl()->getName();
return name == "AppendStructuredBuffer";
}
bool isConsumeStructuredBuffer(QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
const auto *recordType = type->getAs<RecordType>();
if (!recordType)
return false;
const auto name = recordType->getDecl()->getName();
return name == "ConsumeStructuredBuffer";
}
bool isRWStructuredBuffer(QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *recordType = type->getAs<RecordType>()) {
StringRef name = recordType->getDecl()->getName();
return name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer";
}
return false;
}
bool isRWAppendConsumeSBuffer(QualType type) {
return isRWStructuredBuffer(type) || isConsumeStructuredBuffer(type) ||
isAppendStructuredBuffer(type);
}
bool isResourceDescriptorHeap(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
return rt->getDecl()->getName() == ".Resource";
}
return false;
}
bool isSamplerDescriptorHeap(QualType type) {
if (const auto *rt = type->getAs<RecordType>()) {
return rt->getDecl()->getName() == ".Sampler";
}
return false;
}
bool isAKindOfStructuredOrByteBuffer(QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *recordType = type->getAs<RecordType>()) {
StringRef name = recordType->getDecl()->getName();
return name == "StructuredBuffer" || name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer" ||
name == "ByteAddressBuffer" || name == "RWByteAddressBuffer" ||
name == "RasterizerOrderedByteAddressBuffer" ||
name == "AppendStructuredBuffer" ||
name == "ConsumeStructuredBuffer" || name == ".Resource" ||
name == ".Sampler";
}
return false;
}
bool isOrContainsAKindOfStructuredOrByteBuffer(QualType type) {
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *recordType = type->getAs<RecordType>()) {
StringRef name = recordType->getDecl()->getName();
if (name == "StructuredBuffer" || name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer" ||
name == "ByteAddressBuffer" || name == "RWByteAddressBuffer" ||
name == "RasterizerOrderedByteAddressBuffer" ||
name == "AppendStructuredBuffer" || name == "ConsumeStructuredBuffer" ||
name == ".Resource" || name == ".Sampler")
return true;
for (const auto *field : recordType->getDecl()->fields()) {
if (isOrContainsAKindOfStructuredOrByteBuffer(field->getType()))
return true;
}
if (const auto *cxxDecl = type->getAsCXXRecordDecl()) {
for (const auto &base : cxxDecl->bases()) {
if (isOrContainsAKindOfStructuredOrByteBuffer(base.getType())) {
return true;
}
}
}
}
return false;
}
bool isOpaqueType(QualType type) {
if (const auto *recordType = type->getAs<RecordType>()) {
const auto name = recordType->getDecl()->getName();
if (name == "Texture1D" || name == "RWTexture1D" ||
name == "RasterizerOrderedTexture1D")
return true;
if (name == "Texture2D" || name == "RWTexture2D" ||
name == "RasterizerOrderedTexture2D")
return true;
if (name == "Texture2DMS" || name == "RWTexture2DMS")
return true;
if (name == "Texture3D" || name == "RWTexture3D" ||
name == "RasterizerOrderedTexture3D")
return true;
if (name == "TextureCube" || name == "RWTextureCube")
return true;
if (name == "Texture1DArray" || name == "RWTexture1DArray" ||
name == "RasterizerOrderedTexture1DArray")
return true;
if (name == "Texture2DArray" || name == "RWTexture2DArray" ||
name == "RasterizerOrderedTexture2DArray")
return true;
if (name == "Texture2DMSArray" || name == "RWTexture2DMSArray")
return true;
if (name == "TextureCubeArray" || name == "RWTextureCubeArray")
return true;
if (name == "Buffer" || name == "RWBuffer" ||
name == "RasterizerOrderedBuffer")
return true;
if (name == "SamplerState" || name == "SamplerComparisonState")
return true;
if (name == "RaytracingAccelerationStructure")
return true;
if (name == "RayQuery")
return true;
}
return false;
}
std::string getHlslResourceTypeName(QualType type) {
if (type.isNull())
return "";
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *recordType = type->getAs<RecordType>()) {
StringRef name = recordType->getDecl()->getName();
if (name == "StructuredBuffer" || name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer" ||
name == "ByteAddressBuffer" || name == "RWByteAddressBuffer" ||
name == "RasterizerOrderedByteAddressBuffer" ||
name == "AppendStructuredBuffer" || name == "ConsumeStructuredBuffer" ||
name == "Texture1D" || name == "Texture2D" || name == "Texture3D" ||
name == "TextureCube" || name == "Texture1DArray" ||
name == "Texture2DArray" || name == "Texture2DMS" ||
name == "Texture2DMSArray" || name == "TextureCubeArray" ||
name == "RWTexture1D" || name == "RWTexture2D" ||
name == "RWTexture3D" || name == "RWTexture1DArray" ||
name == "RWTexture2DArray" || name == "RasterizerOrderedTexture1D" ||
name == "RasterizerOrderedTexture1DArray" ||
name == "RasterizerOrderedTexture2D" ||
name == "RasterizerOrderedTexture2DArray" ||
name == "RasterizerOrderedTexture3D" || name == "Buffer" ||
name == "RWBuffer" || name == "RasterizerOrderedBuffer" ||
name == "SubpassInput" || name == "SubpassInputMS" ||
name == "InputPatch" || name == "OutputPatch" ||
name == "ConstantBuffer" || name == "TextureBuffer" ||
name == "RaytracingAccelerationStructure") {
// Get resource type name with template params. Operation is safe because
// type has already been null checked.
return type.getLocalUnqualifiedType().getAsString();
}
}
return "";
}
bool isOpaqueStructType(QualType type) {
if (isOpaqueType(type))
return false;
if (const auto *recordType = type->getAs<RecordType>())
for (const auto *field : recordType->getDecl()->decls())
if (const auto *fieldDecl = dyn_cast<FieldDecl>(field))
if (isOpaqueType(fieldDecl->getType()) ||
isOpaqueStructType(fieldDecl->getType()))
return true;
return false;
}
bool isOpaqueArrayType(QualType type) {
if (const auto *arrayType = type->getAsArrayTypeUnsafe())
return isOpaqueType(arrayType->getElementType());
return false;
}
bool isRelaxedPrecisionType(QualType type, const SpirvCodeGenOptions &opts) {
if (type.isNull())
return false;
// Primitive types
{
QualType ty = {};
if (isScalarType(type, &ty))
if (const auto *builtinType = ty->getAs<BuiltinType>())
switch (builtinType->getKind()) {
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::Min16Float:
case BuiltinType::Min10Float: {
// If '-enable-16bit-types' options is enabled, these types are
// translated to real 16-bit type, and therefore are not
// RelaxedPrecision.
// If the options is not enabled, these types are translated to 32-bit
// types with the added RelaxedPrecision decoration.
return !opts.enable16BitTypes;
default:
// Filter switch only interested in relaxed precision eligible types.
break;
}
}
}
// Vector, Matrix and Array types could use relaxed precision based on their
// element type.
{
QualType elemType = {};
if (isVectorType(type, &elemType) || isMxNMatrix(type, &elemType) ||
isArrayType(type, &elemType)) {
return isRelaxedPrecisionType(elemType, opts);
}
}
// Images with RelaxedPrecision sampled type.
if (const auto *recordType = type->getAs<RecordType>()) {
const llvm::StringRef name = recordType->getDecl()->getName();
if (name == "Texture1D" || name == "Texture2D" || name == "Texture3D" ||
name == "TextureCube" || name == "Texture1DArray" ||
name == "Texture2DArray" || name == "Texture2DMS" ||
name == "Texture2DMSArray" || name == "TextureCubeArray" ||
name == "RWTexture1D" || name == "RWTexture2D" ||
name == "RWTexture3D" || name == "RWTexture1DArray" ||
name == "RWTexture2DArray" || name == "RasterizerOrderedTexture1D" ||
name == "RasterizerOrderedTexture1DArray" ||
name == "RasterizerOrderedTexture2D" ||
name == "RasterizerOrderedTexture2DArray" ||
name == "RasterizerOrderedTexture3D" || name == "Buffer" ||
name == "RWBuffer" || name == "RasterizerOrderedBuffer" ||
name == "SubpassInput" || name == "SubpassInputMS") {
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
return isRelaxedPrecisionType(sampledType, opts);
}
}
// Reference types
if (const auto *refType = type->getAs<ReferenceType>())
return isRelaxedPrecisionType(refType->getPointeeType(), opts);
// Pointer types
if (const auto *ptrType = type->getAs<PointerType>())
return isRelaxedPrecisionType(ptrType->getPointeeType(), opts);
return false;
}
bool isRasterizerOrderedView(QualType type) {
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const RecordType *recordType = type->getAs<RecordType>()) {
StringRef name = recordType->getDecl()->getName();
return name == "RasterizerOrderedBuffer" ||
name == "RasterizerOrderedByteAddressBuffer" ||
name == "RasterizerOrderedStructuredBuffer" ||
name == "RasterizerOrderedTexture1D" ||
name == "RasterizerOrderedTexture1DArray" ||
name == "RasterizerOrderedTexture2D" ||
name == "RasterizerOrderedTexture2DArray" ||
name == "RasterizerOrderedTexture3D";
}
return false;
}
/// Returns true if the given type is a bool or vector of bool type.
bool isBoolOrVecOfBoolType(QualType type) {
QualType elemType = {};
return (isScalarType(type, &elemType) || isVectorType(type, &elemType)) &&
elemType->isBooleanType();
}
/// Returns true if the given type is a signed integer or vector of signed
/// integer type.
bool isSintOrVecOfSintType(QualType type) {
if (isEnumType(type))
return true;
QualType elemType = {};
return (isScalarType(type, &elemType) || isVectorType(type, &elemType)) &&
elemType->isSignedIntegerType();
}
/// Returns true if the given type is an unsigned integer or vector of unsigned
/// integer type.
bool isUintOrVecOfUintType(QualType type) {
QualType elemType = {};
return (isScalarType(type, &elemType) || isVectorType(type, &elemType)) &&
elemType->isUnsignedIntegerType();
}
/// Returns true if the given type is a float or vector of float type.
bool isFloatOrVecOfFloatType(QualType type) {
QualType elemType = {};
return (isScalarType(type, &elemType) || isVectorType(type, &elemType)) &&
elemType->isFloatingType();
}
/// Returns true if the given type is a bool or vector/matrix of bool type.
bool isBoolOrVecMatOfBoolType(QualType type) {
return isBoolOrVecOfBoolType(type) ||
(hlsl::IsHLSLMatType(type) &&
hlsl::GetHLSLMatElementType(type)->isBooleanType());
}
/// Returns true if the given type is a signed integer or vector/matrix of
/// signed integer type.
bool isSintOrVecMatOfSintType(QualType type) {
return isSintOrVecOfSintType(type) ||
(hlsl::IsHLSLMatType(type) &&
hlsl::GetHLSLMatElementType(type)->isSignedIntegerType());
}
/// Returns true if the given type is an unsigned integer or vector/matrix of
/// unsigned integer type.
bool isUintOrVecMatOfUintType(QualType type) {
return isUintOrVecOfUintType(type) ||
(hlsl::IsHLSLMatType(type) &&
hlsl::GetHLSLMatElementType(type)->isUnsignedIntegerType());
}
/// Returns true if the given type is a float or vector/matrix of float type.
bool isFloatOrVecMatOfFloatType(QualType type) {
return isFloatOrVecOfFloatType(type) ||
(hlsl::IsHLSLMatType(type) &&
hlsl::GetHLSLMatElementType(type)->isFloatingType());
}
bool isOrContainsNonFpColMajorMatrix(const ASTContext &astContext,
const SpirvCodeGenOptions &spirvOptions,
QualType type, const Decl *decl) {
const auto isColMajorDecl = [&spirvOptions](QualType matTy) {
return !hlsl::IsHLSLMatRowMajor(matTy, spirvOptions.defaultRowMajor);
};
QualType elemType = {};
if (isMxNMatrix(type, &elemType) && !elemType->isFloatingType()) {
return isColMajorDecl(type);
}
if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
if (isMxNMatrix(arrayType->getElementType(), &elemType) &&
!elemType->isFloatingType())
return isColMajorDecl(arrayType->getElementType());
if (const auto *structType =
arrayType->getElementType()->getAs<RecordType>()) {
return isOrContainsNonFpColMajorMatrix(astContext, spirvOptions,
arrayType->getElementType(),
structType->getDecl());
}
}
if (const auto *structType = type->getAs<RecordType>()) {
const auto *decl = structType->getDecl();
for (const auto *field : decl->fields()) {
if (isOrContainsNonFpColMajorMatrix(astContext, spirvOptions,
field->getType(), field))
return true;
}
}
return false;
}
bool isTypeInVkNamespace(const RecordType *type) {
if (const auto *nameSpaceDecl =
dyn_cast<NamespaceDecl>(type->getDecl()->getDeclContext())) {
return nameSpaceDecl->getName() == "vk";
}
return false;
}
bool isExtResultIdType(QualType type) {
if (const auto *elaboratedType = type->getAs<ElaboratedType>()) {
if (const auto *recordType = elaboratedType->getAs<RecordType>()) {
if (!isTypeInVkNamespace(recordType))
return false;
return recordType->getDecl()->getName() == "ext_result_id";
}
}
return false;
}
bool isStringType(QualType type) {
return hlsl::IsStringType(type) || hlsl::IsStringLiteralType(type);
}
bool isBindlessOpaqueArray(QualType type) {
return !type.isNull() && isOpaqueArrayType(type) &&
!type->isConstantArrayType();
}
QualType getComponentVectorType(const ASTContext &astContext,
QualType matrixType) {
assert(isMxNMatrix(matrixType));
const QualType elemType = hlsl::GetHLSLMatElementType(matrixType);
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(matrixType, rowCount, colCount);
return astContext.getExtVectorType(elemType, colCount);
}
QualType getHLSLMatrixType(ASTContext &astContext, Sema &S,
ClassTemplateDecl *templateDecl, QualType elemType,
int rows, int columns) {
const SourceLocation noLoc;
TemplateArgument templateArgs[3] = {
TemplateArgument(elemType),
TemplateArgument(
astContext,
llvm::APSInt(
llvm::APInt(astContext.getIntWidth(astContext.IntTy), rows),
false),
astContext.IntTy),
TemplateArgument(
astContext,
llvm::APSInt(
llvm::APInt(astContext.getIntWidth(astContext.IntTy), columns),
false),
astContext.IntTy)};
SmallVector<TemplateArgument, 4> args;
args.push_back(templateArgs[0]);
args.push_back(templateArgs[1]);
args.push_back(templateArgs[2]);
DeclContext *currentDeclContext = astContext.getTranslationUnitDecl();
SmallVector<TemplateArgument, 3> templateArgsForDecl;
for (const TemplateArgument &Arg : templateArgs) {
if (Arg.getKind() == TemplateArgument::Type) {
// the class template need to use CanonicalType
templateArgsForDecl.emplace_back(
TemplateArgument(Arg.getAsType().getCanonicalType()));
} else
templateArgsForDecl.emplace_back(Arg);
}
// First, try looking up existing specialization
void *insertPos = nullptr;
ClassTemplateSpecializationDecl *specializationDecl =
templateDecl->findSpecialization(templateArgsForDecl, insertPos);
if (specializationDecl) {
// Instantiate the class template if not done yet.
if (specializationDecl->getInstantiatedFrom().isNull()) {
S.InstantiateClassTemplateSpecialization(
noLoc, specializationDecl,
TemplateSpecializationKind::TSK_ImplicitInstantiation, true);
}
return astContext.getTemplateSpecializationType(
TemplateName(templateDecl), args.data(), args.size(),
astContext.getTypeDeclType(specializationDecl));
}
specializationDecl = ClassTemplateSpecializationDecl::Create(
astContext, TagDecl::TagKind::TTK_Class, currentDeclContext, noLoc, noLoc,
templateDecl, templateArgsForDecl.data(), templateArgsForDecl.size(),
nullptr);
S.InstantiateClassTemplateSpecialization(
noLoc, specializationDecl,
TemplateSpecializationKind::TSK_ImplicitInstantiation, true);
templateDecl->AddSpecialization(specializationDecl, insertPos);
specializationDecl->setImplicit(true);
QualType canonType = astContext.getTypeDeclType(specializationDecl);
TemplateArgumentListInfo templateArgumentList(noLoc, noLoc);
TemplateArgumentLocInfo noTemplateArgumentLocInfo;
for (unsigned i = 0; i < args.size(); i++) {
templateArgumentList.addArgument(
TemplateArgumentLoc(args[i], noTemplateArgumentLocInfo));
}
return astContext.getTemplateSpecializationType(
TemplateName(templateDecl), templateArgumentList, canonType);
}
bool isResourceOnlyStructure(QualType type) {
// Remove arrayness if needed.
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const auto *structType = type->getAs<RecordType>()) {
for (const auto *field : structType->getDecl()->fields()) {
const auto fieldType = field->getType();
// isResourceType does remove arrayness for the field if needed.
if (!isResourceType(fieldType) && !isResourceOnlyStructure(fieldType)) {
return false;
}
}
return true;
}
return false;
}
bool isStructureContainingResources(QualType type) {
// Remove arrayness if needed.
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const auto *structType = type->getAs<RecordType>()) {
for (const auto *field : structType->getDecl()->fields()) {
const auto fieldType = field->getType();
// isStructureContainingResources and isResourceType functions both remove
// arrayness for the field if needed.
if (isStructureContainingResources(fieldType) ||
isResourceType(fieldType)) {
return true;
}
}
}
return false;
}
bool isStructureContainingNonResources(QualType type) {
// Remove arrayness if needed.
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const auto *structType = type->getAs<RecordType>()) {
for (const auto *field : structType->getDecl()->fields()) {
const auto fieldType = field->getType();
// isStructureContainingNonResources and isResourceType functions both
// remove arrayness for the field if needed.
if (isStructureContainingNonResources(fieldType) ||
!isResourceType(fieldType)) {
return true;
}
}
}
return false;
}
bool isStructureContainingMixOfResourcesAndNonResources(QualType type) {
return isStructureContainingResources(type) &&
isStructureContainingNonResources(type);
}
bool isStructureContainingAnyKindOfBuffer(QualType type) {
// Remove arrayness if needed.
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
if (const auto *structType = type->getAs<RecordType>()) {
for (const auto *field : structType->getDecl()->fields()) {
auto fieldType = field->getType();
// Remove arrayness if needed.
while (fieldType->isArrayType())
fieldType = fieldType->getAsArrayTypeUnsafe()->getElementType();
if (isAKindOfStructuredOrByteBuffer(fieldType) ||
isConstantTextureBuffer(fieldType) ||
isStructureContainingAnyKindOfBuffer(fieldType)) {
return true;
}
}
}
return false;
}
bool isScalarOrNonStructAggregateOfNumericalTypes(QualType type) {
// Remove arrayness if present.
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
QualType elemType = {};
if (isScalarType(type, &elemType) || isVectorType(type, &elemType) ||
isMxNMatrix(type, &elemType)) {
// Return true if the basic elemen type is a float or non-boolean integer
// type.
return elemType->isFloatingType() ||
(elemType->isIntegerType() && !elemType->isBooleanType());
}
return false;
}
void forEachSpirvField(
const RecordType *recordType, const StructType *spirvType,
std::function<bool(size_t, const QualType &, const StructType::FieldInfo &)>
operation,
bool includeMerged) {
const auto *cxxDecl = recordType->getAsCXXRecordDecl();
const auto *recordDecl = recordType->getDecl();
// Iterate through the base class (one field per base class).
// Bases cannot be melded into 1 field like bitfields, simple iteration.
uint32_t lastConvertedIndex = 0;
size_t astFieldIndex = 0;
for (const auto &base : cxxDecl->bases()) {
const auto &type = base.getType();
const auto &spirvField = spirvType->getFields()[astFieldIndex];
if (!operation(spirvField.fieldIndex, type, spirvField)) {
return;
}
lastConvertedIndex = spirvField.fieldIndex;
++astFieldIndex;
}
// Iterate through the derived class fields. Field could be merged.
for (const auto *field : recordDecl->fields()) {
const auto &spirvField = spirvType->getFields()[astFieldIndex];
const uint32_t currentFieldIndex = spirvField.fieldIndex;
if (!includeMerged && astFieldIndex > 0 &&
currentFieldIndex == lastConvertedIndex) {
++astFieldIndex;
continue;
}
const auto &type = field->getType();
if (!operation(currentFieldIndex, type, spirvField)) {
return;
}
lastConvertedIndex = currentFieldIndex;
++astFieldIndex;
}
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/DeclResultIdMapper.cpp | //===--- DeclResultIdMapper.cpp - DeclResultIdMapper impl --------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "DeclResultIdMapper.h"
#include <algorithm>
#include <optional>
#include <sstream>
#include "dxc/DXIL/DxilConstants.h"
#include "dxc/DXIL/DxilTypeSystem.h"
#include "clang/AST/Expr.h"
#include "clang/AST/HlslTypes.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Casting.h"
#include "AlignmentSizeCalculator.h"
#include "SignaturePackingUtil.h"
#include "SpirvEmitter.h"
namespace clang {
namespace spirv {
namespace {
// Returns true if the image format is compatible with the sampled type. This is
// determined according to the same at
// https://docs.vulkan.org/spec/latest/appendices/spirvenv.html#spirvenv-format-type-matching.
bool areFormatAndTypeCompatible(spv::ImageFormat format, QualType sampledType) {
if (format == spv::ImageFormat::Unknown) {
return true;
}
if (hlsl::IsHLSLVecType(sampledType)) {
// For vectors, we need to check if the element type is compatible. We do
// not check the number of elements because it is possible that the number
// of elements in the sampled type is different. I could not find in the
// spec what should happen in that case.
sampledType = hlsl::GetHLSLVecElementType(sampledType);
}
const Type *desugaredType = sampledType->getUnqualifiedDesugaredType();
const BuiltinType *builtinType = dyn_cast<BuiltinType>(desugaredType);
if (!builtinType) {
return false;
}
switch (format) {
case spv::ImageFormat::Rgba32f:
case spv::ImageFormat::Rg32f:
case spv::ImageFormat::R32f:
case spv::ImageFormat::Rgba16f:
case spv::ImageFormat::Rg16f:
case spv::ImageFormat::R16f:
case spv::ImageFormat::Rgba16:
case spv::ImageFormat::Rg16:
case spv::ImageFormat::R16:
case spv::ImageFormat::Rgba16Snorm:
case spv::ImageFormat::Rg16Snorm:
case spv::ImageFormat::R16Snorm:
case spv::ImageFormat::Rgb10A2:
case spv::ImageFormat::R11fG11fB10f:
case spv::ImageFormat::Rgba8:
case spv::ImageFormat::Rg8:
case spv::ImageFormat::R8:
case spv::ImageFormat::Rgba8Snorm:
case spv::ImageFormat::Rg8Snorm:
case spv::ImageFormat::R8Snorm:
// 32-bit float
return builtinType->getKind() == BuiltinType::Float;
case spv::ImageFormat::Rgba32i:
case spv::ImageFormat::Rg32i:
case spv::ImageFormat::R32i:
case spv::ImageFormat::Rgba16i:
case spv::ImageFormat::Rg16i:
case spv::ImageFormat::R16i:
case spv::ImageFormat::Rgba8i:
case spv::ImageFormat::Rg8i:
case spv::ImageFormat::R8i:
// signed 32-bit int
return builtinType->getKind() == BuiltinType::Int;
case spv::ImageFormat::Rgba32ui:
case spv::ImageFormat::Rg32ui:
case spv::ImageFormat::R32ui:
case spv::ImageFormat::Rgba16ui:
case spv::ImageFormat::Rg16ui:
case spv::ImageFormat::R16ui:
case spv::ImageFormat::Rgb10a2ui:
case spv::ImageFormat::Rgba8ui:
case spv::ImageFormat::Rg8ui:
case spv::ImageFormat::R8ui:
// unsigned 32-bit int
return builtinType->getKind() == BuiltinType::UInt;
case spv::ImageFormat::R64i:
// signed 64-bit int
return builtinType->getKind() == BuiltinType::LongLong;
case spv::ImageFormat::R64ui:
// unsigned 64-bit int
return builtinType->getKind() == BuiltinType::ULongLong;
}
return true;
}
uint32_t getVkBindingAttrSet(const VKBindingAttr *attr, uint32_t defaultSet) {
// If the [[vk::binding(x)]] attribute is provided without the descriptor set,
// we should use the default descriptor set.
if (attr->getSet() == INT_MIN) {
return defaultSet;
}
return attr->getSet();
}
/// Returns the :packoffset() annotation on the given decl. Returns nullptr if
/// the decl does not have one.
hlsl::ConstantPacking *getPackOffset(const clang::NamedDecl *decl) {
for (auto *annotation : decl->getUnusualAnnotations())
if (auto *packing = llvm::dyn_cast<hlsl::ConstantPacking>(annotation))
return packing;
return nullptr;
}
/// Returns the number of binding numbers that are used up by the given type.
/// An array of size N consumes N*M binding numbers where M is the number of
/// binding numbers used by each array element.
/// The number of binding numbers used by a structure is the sum of binding
/// numbers used by its members.
uint32_t getNumBindingsUsedByResourceType(QualType type) {
// For custom-generated types that have SpirvType but no QualType.
if (type.isNull())
return 1;
// For every array dimension, the number of bindings needed should be
// multiplied by the array size. For example: an array of two Textures should
// use 2 binding slots.
uint32_t arrayFactor = 1;
while (auto constArrayType = dyn_cast<ConstantArrayType>(type)) {
arrayFactor *=
static_cast<uint32_t>(constArrayType->getSize().getZExtValue());
type = constArrayType->getElementType();
}
// Once we remove the arrayness, we expect the given type to be either a
// resource OR a structure that only contains resources.
assert(isResourceType(type) || isResourceOnlyStructure(type));
// In the case of a resource, each resource takes 1 binding slot, so in total
// it consumes: 1 * arrayFactor.
if (isResourceType(type))
return arrayFactor;
// In the case of a struct of resources, we need to sum up the number of
// bindings for the struct members. So in total it consumes:
// sum(bindings of struct members) * arrayFactor.
if (isResourceOnlyStructure(type)) {
uint32_t sumOfMemberBindings = 0;
const auto *structDecl = type->getAs<RecordType>()->getDecl();
assert(structDecl);
for (const auto *field : structDecl->fields())
sumOfMemberBindings += getNumBindingsUsedByResourceType(field->getType());
return sumOfMemberBindings * arrayFactor;
}
llvm_unreachable(
"getNumBindingsUsedByResourceType was called with unknown resource type");
}
QualType getUintTypeWithSourceComponents(const ASTContext &astContext,
QualType sourceType) {
if (isScalarType(sourceType)) {
return astContext.UnsignedIntTy;
}
uint32_t elemCount = 0;
if (isVectorType(sourceType, nullptr, &elemCount)) {
return astContext.getExtVectorType(astContext.UnsignedIntTy, elemCount);
}
llvm_unreachable("only scalar and vector types are supported in "
"getUintTypeWithSourceComponents");
}
LocationAndComponent getLocationAndComponentCount(const ASTContext &astContext,
QualType type) {
// See Vulkan spec 14.1.4. Location Assignment for the complete set of rules.
const auto canonicalType = type.getCanonicalType();
if (canonicalType != type)
return getLocationAndComponentCount(astContext, canonicalType);
// Inputs and outputs of the following types consume a single interface
// location:
// * 16-bit scalar and vector types, and
// * 32-bit scalar and vector types, and
// * 64-bit scalar and 2-component vector types.
// 64-bit three- and four- component vectors consume two consecutive
// locations.
// Primitive types
if (isScalarType(type)) {
const auto *builtinType = type->getAs<BuiltinType>();
if (builtinType != nullptr) {
switch (builtinType->getKind()) {
case BuiltinType::Double:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
return {1, 2, true};
default:
return {1, 1, false};
}
}
return {1, 1, false};
}
// Vector types
{
QualType elemType = {};
uint32_t elemCount = {};
if (isVectorType(type, &elemType, &elemCount)) {
const auto *builtinType = elemType->getAs<BuiltinType>();
switch (builtinType->getKind()) {
case BuiltinType::Double:
case BuiltinType::LongLong:
case BuiltinType::ULongLong: {
if (elemCount >= 3)
return {2, 4, true};
return {1, 2 * elemCount, true};
}
default:
// Filter switch only interested in types occupying 2 locations.
break;
}
return {1, elemCount, false};
}
}
// If the declared input or output is an n * m 16- , 32- or 64- bit matrix,
// it will be assigned multiple locations starting with the location
// specified. The number of locations assigned for each matrix will be the
// same as for an n-element array of m-component vectors.
// Matrix types
{
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
auto locComponentCount = getLocationAndComponentCount(
astContext, astContext.getExtVectorType(elemType, colCount));
return {locComponentCount.location * rowCount,
locComponentCount.component,
locComponentCount.componentAlignment};
}
}
// Typedefs
if (const auto *typedefType = type->getAs<TypedefType>())
return getLocationAndComponentCount(astContext, typedefType->desugar());
// Reference types
if (const auto *refType = type->getAs<ReferenceType>())
return getLocationAndComponentCount(astContext, refType->getPointeeType());
// Pointer types
if (const auto *ptrType = type->getAs<PointerType>())
return getLocationAndComponentCount(astContext, ptrType->getPointeeType());
// If a declared input or output is an array of size n and each element takes
// m locations, it will be assigned m * n consecutive locations starting with
// the location specified.
// Array types
if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
auto locComponentCount =
getLocationAndComponentCount(astContext, arrayType->getElementType());
uint32_t arrayLength =
static_cast<uint32_t>(arrayType->getSize().getZExtValue());
return {locComponentCount.location * arrayLength,
locComponentCount.component, locComponentCount.componentAlignment};
}
// Struct type
if (type->getAs<RecordType>()) {
assert(false && "all structs should already be flattened");
return {0, 0, false};
}
llvm_unreachable(
"calculating number of occupied locations for type unimplemented");
return {0, 0, false};
}
bool shouldSkipInStructLayout(const Decl *decl) {
// Ignore implicit generated struct declarations/constructors/destructors
if (decl->isImplicit())
return true;
// Ignore embedded type decls
if (isa<TypeDecl>(decl))
return true;
// Ignore embeded function decls
if (isa<FunctionDecl>(decl))
return true;
// Ignore empty decls
if (isa<EmptyDecl>(decl))
return true;
// For the $Globals cbuffer, we only care about externally-visible
// non-resource-type variables. The rest should be filtered out.
const auto *declContext = decl->getDeclContext();
// $Globals' "struct" is the TranslationUnit, so we should ignore resources
// in the TranslationUnit "struct" and its child namespaces.
if (declContext->isTranslationUnit() || declContext->isNamespace()) {
if (decl->hasAttr<VKConstantIdAttr>()) {
return true;
}
if (decl->hasAttr<VKPushConstantAttr>()) {
return true;
}
// External visibility
if (const auto *declDecl = dyn_cast<DeclaratorDecl>(decl))
if (!declDecl->hasExternalFormalLinkage())
return true;
// cbuffer/tbuffer
if (isa<HLSLBufferDecl>(decl))
return true;
// 'groupshared' variables should not be placed in $Globals cbuffer.
if (decl->hasAttr<HLSLGroupSharedAttr>())
return true;
// Other resource types
if (const auto *valueDecl = dyn_cast<ValueDecl>(decl)) {
const auto declType = valueDecl->getType();
if (isResourceType(declType) || isResourceOnlyStructure(declType))
return true;
}
}
return false;
}
void collectDeclsInField(const Decl *field,
llvm::SmallVector<const Decl *, 4> *decls) {
// Case of nested namespaces.
if (const auto *nsDecl = dyn_cast<NamespaceDecl>(field)) {
for (const auto *decl : nsDecl->decls()) {
collectDeclsInField(decl, decls);
}
}
if (shouldSkipInStructLayout(field))
return;
if (!isa<DeclaratorDecl>(field)) {
return;
}
decls->push_back(field);
}
llvm::SmallVector<const Decl *, 4>
collectDeclsInDeclContext(const DeclContext *declContext) {
llvm::SmallVector<const Decl *, 4> decls;
for (const auto *field : declContext->decls()) {
collectDeclsInField(field, &decls);
}
return decls;
}
/// \brief Returns true if the given decl is a boolean stage I/O variable.
/// Returns false if the type is not boolean, or the decl is a built-in stage
/// variable.
bool isBooleanStageIOVar(const NamedDecl *decl, QualType type,
const hlsl::DXIL::SemanticKind semanticKind,
const hlsl::SigPoint::Kind sigPointKind) {
// [[vk::builtin(...)]] makes the decl a built-in stage variable.
// IsFrontFace (if used as PSIn) is the only known boolean built-in stage
// variable.
const bool isBooleanBuiltin =
(decl->getAttr<VKBuiltInAttr>() != nullptr) ||
(semanticKind == hlsl::Semantic::Kind::IsFrontFace &&
sigPointKind == hlsl::SigPoint::Kind::PSIn);
// TODO: support boolean matrix stage I/O variable if needed.
QualType elemType = {};
const bool isBooleanType =
((isScalarType(type, &elemType) || isVectorType(type, &elemType)) &&
elemType->isBooleanType());
return isBooleanType && !isBooleanBuiltin;
}
/// \brief Returns the stage variable's register assignment for the given Decl.
const hlsl::RegisterAssignment *getResourceBinding(const NamedDecl *decl) {
for (auto *annotation : decl->getUnusualAnnotations()) {
if (auto *reg = dyn_cast<hlsl::RegisterAssignment>(annotation)) {
return reg;
}
}
return nullptr;
}
/// \brief Returns the stage variable's 'register(c#) assignment for the given
/// Decl. Return nullptr if the given variable does not have such assignment.
const hlsl::RegisterAssignment *getRegisterCAssignment(const NamedDecl *decl) {
const auto *regAssignment = getResourceBinding(decl);
if (regAssignment)
return regAssignment->RegisterType == 'c' ? regAssignment : nullptr;
return nullptr;
}
/// \brief Returns true if the given declaration has a primitive type qualifier.
/// Returns false otherwise.
inline bool hasGSPrimitiveTypeQualifier(const Decl *decl) {
return decl->hasAttr<HLSLTriangleAttr>() ||
decl->hasAttr<HLSLTriangleAdjAttr>() ||
decl->hasAttr<HLSLPointAttr>() || decl->hasAttr<HLSLLineAttr>() ||
decl->hasAttr<HLSLLineAdjAttr>();
}
/// \brief Deduces the parameter qualifier for the given decl.
hlsl::DxilParamInputQual deduceParamQual(const DeclaratorDecl *decl,
bool asInput) {
const auto type = decl->getType();
if (hlsl::IsHLSLInputPatchType(type))
return hlsl::DxilParamInputQual::InputPatch;
if (hlsl::IsHLSLOutputPatchType(type))
return hlsl::DxilParamInputQual::OutputPatch;
// TODO: Add support for multiple output streams.
if (hlsl::IsHLSLStreamOutputType(type))
return hlsl::DxilParamInputQual::OutStream0;
// The inputs to the geometry shader that have a primitive type qualifier
// must use 'InputPrimitive'.
if (hasGSPrimitiveTypeQualifier(decl))
return hlsl::DxilParamInputQual::InputPrimitive;
if (decl->hasAttr<HLSLIndicesAttr>())
return hlsl::DxilParamInputQual::OutIndices;
if (decl->hasAttr<HLSLVerticesAttr>())
return hlsl::DxilParamInputQual::OutVertices;
if (decl->hasAttr<HLSLPrimitivesAttr>())
return hlsl::DxilParamInputQual::OutPrimitives;
if (decl->hasAttr<HLSLPayloadAttr>())
return hlsl::DxilParamInputQual::InPayload;
return asInput ? hlsl::DxilParamInputQual::In : hlsl::DxilParamInputQual::Out;
}
/// \brief Deduces the HLSL SigPoint for the given decl appearing in the given
/// shader model.
const hlsl::SigPoint *deduceSigPoint(const DeclaratorDecl *decl, bool asInput,
const hlsl::ShaderModel::Kind kind,
bool forPCF) {
return hlsl::SigPoint::GetSigPoint(hlsl::SigPointFromInputQual(
deduceParamQual(decl, asInput), kind, forPCF));
}
/// Returns the type of the given decl. If the given decl is a FunctionDecl,
/// returns its result type.
inline QualType getTypeOrFnRetType(const DeclaratorDecl *decl) {
if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
return funcDecl->getReturnType();
}
return decl->getType();
}
/// Returns the number of base classes if this type is a derived class/struct.
/// Returns zero otherwise.
inline uint32_t getNumBaseClasses(QualType type) {
if (const auto *cxxDecl = type->getAsCXXRecordDecl())
return cxxDecl->getNumBases();
return 0;
}
/// Returns the appropriate storage class for an extern variable of the given
/// type.
spv::StorageClass getStorageClassForExternVar(QualType type,
bool hasGroupsharedAttr) {
// For CS groupshared variables
if (hasGroupsharedAttr)
return spv::StorageClass::Workgroup;
if (isAKindOfStructuredOrByteBuffer(type) || isConstantTextureBuffer(type))
return spv::StorageClass::Uniform;
return spv::StorageClass::UniformConstant;
}
/// Returns the appropriate layout rule for an extern variable of the given
/// type.
SpirvLayoutRule getLayoutRuleForExternVar(QualType type,
const SpirvCodeGenOptions &opts) {
if (isAKindOfStructuredOrByteBuffer(type))
return opts.sBufferLayoutRule;
if (isConstantBuffer(type))
return opts.cBufferLayoutRule;
if (isTextureBuffer(type))
return opts.tBufferLayoutRule;
return SpirvLayoutRule::Void;
}
spv::ImageFormat getSpvImageFormat(const VKImageFormatAttr *imageFormatAttr) {
if (imageFormatAttr == nullptr)
return spv::ImageFormat::Unknown;
switch (imageFormatAttr->getImageFormat()) {
case VKImageFormatAttr::unknown:
return spv::ImageFormat::Unknown;
case VKImageFormatAttr::rgba32f:
return spv::ImageFormat::Rgba32f;
case VKImageFormatAttr::rgba16f:
return spv::ImageFormat::Rgba16f;
case VKImageFormatAttr::r32f:
return spv::ImageFormat::R32f;
case VKImageFormatAttr::rgba8:
return spv::ImageFormat::Rgba8;
case VKImageFormatAttr::rgba8snorm:
return spv::ImageFormat::Rgba8Snorm;
case VKImageFormatAttr::rg32f:
return spv::ImageFormat::Rg32f;
case VKImageFormatAttr::rg16f:
return spv::ImageFormat::Rg16f;
case VKImageFormatAttr::r11g11b10f:
return spv::ImageFormat::R11fG11fB10f;
case VKImageFormatAttr::r16f:
return spv::ImageFormat::R16f;
case VKImageFormatAttr::rgba16:
return spv::ImageFormat::Rgba16;
case VKImageFormatAttr::rgb10a2:
return spv::ImageFormat::Rgb10A2;
case VKImageFormatAttr::rg16:
return spv::ImageFormat::Rg16;
case VKImageFormatAttr::rg8:
return spv::ImageFormat::Rg8;
case VKImageFormatAttr::r16:
return spv::ImageFormat::R16;
case VKImageFormatAttr::r8:
return spv::ImageFormat::R8;
case VKImageFormatAttr::rgba16snorm:
return spv::ImageFormat::Rgba16Snorm;
case VKImageFormatAttr::rg16snorm:
return spv::ImageFormat::Rg16Snorm;
case VKImageFormatAttr::rg8snorm:
return spv::ImageFormat::Rg8Snorm;
case VKImageFormatAttr::r16snorm:
return spv::ImageFormat::R16Snorm;
case VKImageFormatAttr::r8snorm:
return spv::ImageFormat::R8Snorm;
case VKImageFormatAttr::rgba32i:
return spv::ImageFormat::Rgba32i;
case VKImageFormatAttr::rgba16i:
return spv::ImageFormat::Rgba16i;
case VKImageFormatAttr::rgba8i:
return spv::ImageFormat::Rgba8i;
case VKImageFormatAttr::r32i:
return spv::ImageFormat::R32i;
case VKImageFormatAttr::rg32i:
return spv::ImageFormat::Rg32i;
case VKImageFormatAttr::rg16i:
return spv::ImageFormat::Rg16i;
case VKImageFormatAttr::rg8i:
return spv::ImageFormat::Rg8i;
case VKImageFormatAttr::r16i:
return spv::ImageFormat::R16i;
case VKImageFormatAttr::r8i:
return spv::ImageFormat::R8i;
case VKImageFormatAttr::rgba32ui:
return spv::ImageFormat::Rgba32ui;
case VKImageFormatAttr::rgba16ui:
return spv::ImageFormat::Rgba16ui;
case VKImageFormatAttr::rgba8ui:
return spv::ImageFormat::Rgba8ui;
case VKImageFormatAttr::r32ui:
return spv::ImageFormat::R32ui;
case VKImageFormatAttr::rgb10a2ui:
return spv::ImageFormat::Rgb10a2ui;
case VKImageFormatAttr::rg32ui:
return spv::ImageFormat::Rg32ui;
case VKImageFormatAttr::rg16ui:
return spv::ImageFormat::Rg16ui;
case VKImageFormatAttr::rg8ui:
return spv::ImageFormat::Rg8ui;
case VKImageFormatAttr::r16ui:
return spv::ImageFormat::R16ui;
case VKImageFormatAttr::r8ui:
return spv::ImageFormat::R8ui;
case VKImageFormatAttr::r64ui:
return spv::ImageFormat::R64ui;
case VKImageFormatAttr::r64i:
return spv::ImageFormat::R64i;
}
return spv::ImageFormat::Unknown;
}
// Inserts seen semantics for entryPoint to seenSemanticsForEntryPoints. Returns
// whether it does not already exist in seenSemanticsForEntryPoints.
bool insertSeenSemanticsForEntryPointIfNotExist(
llvm::SmallDenseMap<SpirvFunction *, llvm::StringSet<>>
*seenSemanticsForEntryPoints,
SpirvFunction *entryPoint, const std::string &semantics) {
auto seenSemanticsForEntryPointsItr =
seenSemanticsForEntryPoints->find(entryPoint);
if (seenSemanticsForEntryPointsItr == seenSemanticsForEntryPoints->end()) {
bool insertResult = false;
std::tie(seenSemanticsForEntryPointsItr, insertResult) =
seenSemanticsForEntryPoints->insert(
std::make_pair(entryPoint, llvm::StringSet<>()));
assert(insertResult);
seenSemanticsForEntryPointsItr->second.insert(semantics);
return true;
}
auto &seenSemantics = seenSemanticsForEntryPointsItr->second;
if (seenSemantics.count(semantics)) {
return false;
}
seenSemantics.insert(semantics);
return true;
}
// Returns whether the type is translated to a 32-bit floating point type,
// depending on whether SPIR-V codegen options are configured to use 16-bit
// types when possible.
bool is32BitFloatingPointType(BuiltinType::Kind kind, bool use16Bit) {
// Always translated into 32-bit floating point types.
if (kind == BuiltinType::Float || kind == BuiltinType::LitFloat)
return true;
// Translated into 32-bit floating point types when run without
// -enable-16bit-types.
if (kind == BuiltinType::Half || kind == BuiltinType::HalfFloat ||
kind == BuiltinType::Min10Float || kind == BuiltinType::Min16Float)
return !use16Bit;
return false;
}
// Returns whether the type is a 4-component 32-bit float or a composite type
// recursively including only such a vector e.g., float4, float4[1], struct S {
// float4 foo[1]; }.
bool containOnlyVecWithFourFloats(QualType type, bool use16Bit) {
if (type->isReferenceType())
type = type->getPointeeType();
if (is1xNMatrix(type, nullptr, nullptr))
return false;
uint32_t elemCount = 0;
if (type->isConstantArrayType()) {
const ConstantArrayType *arrayType =
(const ConstantArrayType *)type->getAsArrayTypeUnsafe();
elemCount = hlsl::GetArraySize(type);
return elemCount == 1 &&
containOnlyVecWithFourFloats(arrayType->getElementType(), use16Bit);
}
if (const auto *structType = type->getAs<RecordType>()) {
uint32_t fieldCount = 0;
for (const auto *field : structType->getDecl()->fields()) {
if (fieldCount != 0)
return false;
if (!containOnlyVecWithFourFloats(field->getType(), use16Bit))
return false;
++fieldCount;
}
return fieldCount == 1;
}
QualType elemType = {};
if (isVectorType(type, &elemType, &elemCount)) {
if (const auto *builtinType = elemType->getAs<BuiltinType>()) {
return elemCount == 4 &&
is32BitFloatingPointType(builtinType->getKind(), use16Bit);
}
return false;
}
return false;
}
} // anonymous namespace
std::string StageVar::getSemanticStr() const {
// A special case for zero index, which is equivalent to no index.
// Use what is in the source code.
// TODO: this looks like a hack to make the current tests happy.
// Should consider remove it and fix all tests.
if (semanticInfo.index == 0)
return semanticInfo.str;
std::ostringstream ss;
ss << semanticInfo.name.str() << semanticInfo.index;
return ss.str();
}
SpirvInstruction *CounterIdAliasPair::getAliasAddress() const {
assert(isAlias);
return counterVar;
}
SpirvInstruction *
CounterIdAliasPair::getCounterVariable(SpirvBuilder &builder,
SpirvContext &spvContext) const {
if (isAlias) {
const auto *counterType = spvContext.getACSBufferCounterType();
const auto *counterVarType =
spvContext.getPointerType(counterType, spv::StorageClass::Uniform);
return builder.createLoad(counterVarType, counterVar,
/* SourceLocation */ {});
}
return counterVar;
}
const CounterIdAliasPair *
CounterVarFields::get(const llvm::SmallVectorImpl<uint32_t> &indices) const {
for (const auto &field : fields)
if (field.indices == indices)
return &field.counterVar;
return nullptr;
}
bool CounterVarFields::assign(const CounterVarFields &srcFields,
SpirvBuilder &builder,
SpirvContext &context) const {
for (const auto &field : fields) {
const auto *srcField = srcFields.get(field.indices);
if (!srcField)
return false;
field.counterVar.assign(srcField->getCounterVariable(builder, context),
builder);
}
return true;
}
bool CounterVarFields::assign(const CounterVarFields &srcFields,
const llvm::SmallVector<uint32_t, 4> &dstPrefix,
const llvm::SmallVector<uint32_t, 4> &srcPrefix,
SpirvBuilder &builder,
SpirvContext &context) const {
if (dstPrefix.empty() && srcPrefix.empty())
return assign(srcFields, builder, context);
llvm::SmallVector<uint32_t, 4> srcIndices = srcPrefix;
// If whole has the given prefix, appends all elements after the prefix in
// whole to srcIndices.
const auto applyDiff =
[&srcIndices](const llvm::SmallVector<uint32_t, 4> &whole,
const llvm::SmallVector<uint32_t, 4> &prefix) -> bool {
uint32_t i = 0;
for (; i < prefix.size(); ++i)
if (whole[i] != prefix[i]) {
break;
}
if (i == prefix.size()) {
for (; i < whole.size(); ++i)
srcIndices.push_back(whole[i]);
return true;
}
return false;
};
for (const auto &field : fields)
if (applyDiff(field.indices, dstPrefix)) {
const auto *srcField = srcFields.get(srcIndices);
if (!srcField)
return false;
field.counterVar.assign(srcField->getCounterVariable(builder, context),
builder);
for (uint32_t i = srcPrefix.size(); i < srcIndices.size(); ++i)
srcIndices.pop_back();
}
return true;
}
SemanticInfo DeclResultIdMapper::getStageVarSemantic(const NamedDecl *decl) {
for (auto *annotation : decl->getUnusualAnnotations()) {
if (auto *sema = dyn_cast<hlsl::SemanticDecl>(annotation)) {
llvm::StringRef semanticStr = sema->SemanticName;
llvm::StringRef semanticName;
uint32_t index = 0;
hlsl::Semantic::DecomposeNameAndIndex(semanticStr, &semanticName, &index);
const auto *semantic = hlsl::Semantic::GetByName(semanticName);
return {semanticStr, semantic, semanticName, index, sema->Loc};
}
}
return {};
}
bool DeclResultIdMapper::createStageOutputVar(const DeclaratorDecl *decl,
SpirvInstruction *storedValue,
bool forPCF) {
QualType type = getTypeOrFnRetType(decl);
uint32_t arraySize = 0;
// Output stream types (PointStream, LineStream, TriangleStream) are
// translated as their underlying struct types.
if (hlsl::IsHLSLStreamOutputType(type))
type = hlsl::GetHLSLResourceResultType(type);
if (decl->hasAttr<HLSLIndicesAttr>() || decl->hasAttr<HLSLVerticesAttr>() ||
decl->hasAttr<HLSLPrimitivesAttr>()) {
const auto *typeDecl = astContext.getAsConstantArrayType(type);
type = typeDecl->getElementType();
arraySize = static_cast<uint32_t>(typeDecl->getSize().getZExtValue());
if (decl->hasAttr<HLSLIndicesAttr>()) {
// create SPIR-V builtin array PrimitiveIndicesNV of type
// "uint [MaxPrimitiveCount * verticesPerPrim]"
uint32_t verticesPerPrim = 1;
if (!isVectorType(type, nullptr, &verticesPerPrim)) {
assert(isScalarType(type));
}
spv::BuiltIn builtinID = spv::BuiltIn::Max;
if (featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
// For EXT_mesh_shader, set builtin type as
// PrimitivePoint/Line/TriangleIndicesEXT based on the vertices per
// primitive
switch (verticesPerPrim) {
case 1:
builtinID = spv::BuiltIn::PrimitivePointIndicesEXT;
break;
case 2:
builtinID = spv::BuiltIn::PrimitiveLineIndicesEXT;
break;
case 3:
builtinID = spv::BuiltIn::PrimitiveTriangleIndicesEXT;
break;
default:
break;
}
QualType arrayType = astContext.getConstantArrayType(
type, llvm::APInt(32, arraySize), clang::ArrayType::Normal, 0);
stageVarInstructions[cast<DeclaratorDecl>(decl)] =
getBuiltinVar(builtinID, arrayType, decl->getLocation());
} else {
// For NV_mesh_shader, the built type is PrimitiveIndicesNV
builtinID = spv::BuiltIn::PrimitiveIndicesNV;
arraySize = arraySize * verticesPerPrim;
QualType arrayType = astContext.getConstantArrayType(
astContext.UnsignedIntTy, llvm::APInt(32, arraySize),
clang::ArrayType::Normal, 0);
stageVarInstructions[cast<DeclaratorDecl>(decl)] =
getBuiltinVar(builtinID, arrayType, decl->getLocation());
}
return true;
}
}
const auto *sigPoint = deduceSigPoint(
decl, /*asInput=*/false, spvContext.getCurrentShaderModelKind(), forPCF);
// HS output variables are created using the other overload. For the rest,
// none of them should be created as arrays.
assert(sigPoint->GetKind() != hlsl::DXIL::SigPointKind::HSCPOut);
SemanticInfo inheritSemantic = {};
// If storedValue is 0, it means this parameter in the original source code is
// not used at all. Avoid writing back.
//
// Write back of stage output variables in GS is manually controlled by
// .Append() intrinsic method, implemented in writeBackOutputStream(). So
// ignoreValue should be set to true for GS.
const bool noWriteBack =
storedValue == nullptr || spvContext.isGS() || spvContext.isMS();
StageVarDataBundle stageVarData = {
decl, &inheritSemantic, false, sigPoint,
type, arraySize, "out.var", llvm::None};
return createStageVars(stageVarData, /*asInput=*/false, &storedValue,
noWriteBack);
}
bool DeclResultIdMapper::createStageOutputVar(const DeclaratorDecl *decl,
uint32_t arraySize,
SpirvInstruction *invocationId,
SpirvInstruction *storedValue) {
assert(spvContext.isHS());
QualType type = getTypeOrFnRetType(decl);
const auto *sigPoint =
hlsl::SigPoint::GetSigPoint(hlsl::DXIL::SigPointKind::HSCPOut);
SemanticInfo inheritSemantic = {};
StageVarDataBundle stageVarData = {
decl, &inheritSemantic, false, sigPoint,
type, arraySize, "out.var", invocationId};
return createStageVars(stageVarData, /*asInput=*/false, &storedValue,
/*noWriteBack=*/false);
}
bool DeclResultIdMapper::createStageInputVar(const ParmVarDecl *paramDecl,
SpirvInstruction **loadedValue,
bool forPCF) {
uint32_t arraySize = 0;
QualType type = paramDecl->getType();
// Deprive the outermost arrayness for HS/DS/GS and use arraySize
// to convey that information
if (hlsl::IsHLSLInputPatchType(type)) {
arraySize = hlsl::GetHLSLInputPatchCount(type);
type = hlsl::GetHLSLInputPatchElementType(type);
} else if (hlsl::IsHLSLOutputPatchType(type)) {
arraySize = hlsl::GetHLSLOutputPatchCount(type);
type = hlsl::GetHLSLOutputPatchElementType(type);
}
if (hasGSPrimitiveTypeQualifier(paramDecl)) {
const auto *typeDecl = astContext.getAsConstantArrayType(type);
arraySize = static_cast<uint32_t>(typeDecl->getSize().getZExtValue());
type = typeDecl->getElementType();
}
const auto *sigPoint =
deduceSigPoint(paramDecl, /*asInput=*/true,
spvContext.getCurrentShaderModelKind(), forPCF);
SemanticInfo inheritSemantic = {};
if (paramDecl->hasAttr<HLSLPayloadAttr>()) {
spv::StorageClass sc =
(featureManager.isExtensionEnabled(Extension::EXT_mesh_shader))
? spv::StorageClass::TaskPayloadWorkgroupEXT
: getStorageClassForSigPoint(sigPoint);
return createPayloadStageVars(sigPoint, sc, paramDecl, /*asInput=*/true,
type, "in.var", loadedValue);
} else {
StageVarDataBundle stageVarData = {
paramDecl,
&inheritSemantic,
paramDecl->hasAttr<HLSLNoInterpolationAttr>(),
sigPoint,
type,
arraySize,
"in.var",
llvm::None};
return createStageVars(stageVarData, /*asInput=*/true, loadedValue,
/*noWriteBack=*/false);
}
}
const DeclResultIdMapper::DeclSpirvInfo *
DeclResultIdMapper::getDeclSpirvInfo(const ValueDecl *decl) const {
auto it = astDecls.find(decl);
if (it != astDecls.end())
return &it->second;
return nullptr;
}
SpirvInstruction *DeclResultIdMapper::getDeclEvalInfo(const ValueDecl *decl,
SourceLocation loc,
SourceRange range) {
if (auto *builtinAttr = decl->getAttr<VKExtBuiltinInputAttr>()) {
return getBuiltinVar(spv::BuiltIn(builtinAttr->getBuiltInID()),
decl->getType(), spv::StorageClass::Input, loc);
} else if (auto *builtinAttr = decl->getAttr<VKExtBuiltinOutputAttr>()) {
return getBuiltinVar(spv::BuiltIn(builtinAttr->getBuiltInID()),
decl->getType(), spv::StorageClass::Output, loc);
}
const DeclSpirvInfo *info = getDeclSpirvInfo(decl);
// If DeclSpirvInfo is not found for this decl, it might be because it is an
// implicit VarDecl. All implicit VarDecls are lazily created in order to
// avoid creating large number of unused variables/constants/enums.
if (!info) {
tryToCreateImplicitConstVar(decl);
info = getDeclSpirvInfo(decl);
}
if (info) {
if (info->indexInCTBuffer >= 0) {
// If this is a VarDecl inside a HLSLBufferDecl, we need to do an extra
// OpAccessChain to get the pointer to the variable since we created
// a single variable for the whole buffer object.
// Should only have VarDecls in a HLSLBufferDecl.
QualType valueType = cast<VarDecl>(decl)->getType();
return spvBuilder.createAccessChain(
valueType, info->instr,
{spvBuilder.getConstantInt(
astContext.IntTy, llvm::APInt(32, info->indexInCTBuffer, true))},
loc, range);
} else if (auto *type = info->instr->getResultType()) {
const auto *ptrTy = dyn_cast<HybridPointerType>(type);
// If it is a local variable or function parameter with a bindless
// array of an opaque type, we have to load it because we pass a
// pointer of a global variable that has the bindless opaque array.
if (ptrTy != nullptr && isBindlessOpaqueArray(decl->getType())) {
auto *load = spvBuilder.createLoad(ptrTy, info->instr, loc, range);
load->setRValue(false);
return load;
} else {
return *info;
}
} else {
return *info;
}
}
emitFatalError("found unregistered decl %0", decl->getLocation())
<< decl->getName();
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues with "
"source code if possible",
{});
return 0;
}
SpirvFunctionParameter *
DeclResultIdMapper::createFnParam(const ParmVarDecl *param,
uint32_t dbgArgNumber) {
const auto type = getTypeOrFnRetType(param);
const auto loc = param->getLocation();
const auto range = param->getSourceRange();
const auto name = param->getName();
SpirvFunctionParameter *fnParamInstr = spvBuilder.addFnParam(
type, param->hasAttr<HLSLPreciseAttr>(),
param->hasAttr<HLSLNoInterpolationAttr>(), loc, param->getName());
bool isAlias = false;
(void)getTypeAndCreateCounterForPotentialAliasVar(param, &isAlias);
fnParamInstr->setContainsAliasComponent(isAlias);
assert(astDecls[param].instr == nullptr);
astDecls[param].instr = fnParamInstr;
if (spirvOptions.debugInfoRich) {
// Add DebugLocalVariable information
const auto &sm = astContext.getSourceManager();
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
const auto *info = theEmitter.getOrCreateRichDebugInfo(loc);
// TODO: replace this with FlagIsLocal enum.
uint32_t flags = 1 << 2;
auto *debugLocalVar = spvBuilder.createDebugLocalVariable(
type, name, info->source, line, column, info->scopeStack.back(), flags,
dbgArgNumber);
spvBuilder.createDebugDeclare(debugLocalVar, fnParamInstr, loc, range);
}
return fnParamInstr;
}
void DeclResultIdMapper::createCounterVarForDecl(const DeclaratorDecl *decl) {
const QualType declType = getTypeOrFnRetType(decl);
if (!counterVars.count(decl) && isRWAppendConsumeSBuffer(declType)) {
createCounterVar(decl, /*declId=*/0, /*isAlias=*/true);
} else if (!fieldCounterVars.count(decl) && declType->isStructureType() &&
// Exclude other resource types which are represented as structs
!hlsl::IsHLSLResourceType(declType)) {
createFieldCounterVars(decl);
}
}
SpirvVariable *
DeclResultIdMapper::createFnVar(const VarDecl *var,
llvm::Optional<SpirvInstruction *> init) {
if (astDecls[var].instr != nullptr)
return cast<SpirvVariable>(astDecls[var].instr);
const auto type = getTypeOrFnRetType(var);
const auto loc = var->getLocation();
const auto name = var->getName();
const bool isPrecise = var->hasAttr<HLSLPreciseAttr>();
const bool isNointerp = var->hasAttr<HLSLNoInterpolationAttr>();
SpirvVariable *varInstr =
spvBuilder.addFnVar(type, loc, name, isPrecise, isNointerp,
init.hasValue() ? init.getValue() : nullptr);
bool isAlias = false;
(void)getTypeAndCreateCounterForPotentialAliasVar(var, &isAlias);
varInstr->setContainsAliasComponent(isAlias);
astDecls[var].instr = varInstr;
return varInstr;
}
SpirvDebugGlobalVariable *DeclResultIdMapper::createDebugGlobalVariable(
SpirvVariable *var, const QualType &type, const SourceLocation &loc,
const StringRef &name) {
if (spirvOptions.debugInfoRich) {
// Add DebugGlobalVariable information
const auto &sm = astContext.getSourceManager();
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
const auto *info = theEmitter.getOrCreateRichDebugInfo(loc);
// TODO: replace this with FlagIsDefinition enum.
uint32_t flags = 1 << 3;
// TODO: update linkageName correctly.
auto *dbgGlobalVar = spvBuilder.createDebugGlobalVariable(
type, name, info->source, line, column, info->scopeStack.back(),
/* linkageName */ name, var, flags);
dbgGlobalVar->setDebugSpirvType(var->getResultType());
dbgGlobalVar->setLayoutRule(var->getLayoutRule());
return dbgGlobalVar;
}
return nullptr;
}
SpirvVariable *
DeclResultIdMapper::createFileVar(const VarDecl *var,
llvm::Optional<SpirvInstruction *> init) {
// In the case of template specialization, the same VarDecl node in the AST
// may be traversed more than once.
if (astDecls[var].instr != nullptr) {
return cast<SpirvVariable>(astDecls[var].instr);
}
const auto type = getTypeOrFnRetType(var);
const auto loc = var->getLocation();
const auto name = var->getName();
SpirvVariable *varInstr = spvBuilder.addModuleVar(
type, spv::StorageClass::Private, var->hasAttr<HLSLPreciseAttr>(),
var->hasAttr<HLSLNoInterpolationAttr>(), name, init, loc);
bool isAlias = false;
(void)getTypeAndCreateCounterForPotentialAliasVar(var, &isAlias);
varInstr->setContainsAliasComponent(isAlias);
astDecls[var].instr = varInstr;
createDebugGlobalVariable(varInstr, type, loc, name);
return varInstr;
}
SpirvVariable *DeclResultIdMapper::createResourceHeap(const VarDecl *var,
QualType ResourceType) {
QualType ResourceArrayType = astContext.getIncompleteArrayType(
ResourceType, clang::ArrayType::Normal, 0);
return createExternVar(var, ResourceArrayType);
}
SpirvVariable *DeclResultIdMapper::createExternVar(const VarDecl *var) {
return createExternVar(var, var->getType());
}
SpirvVariable *DeclResultIdMapper::createExternVar(const VarDecl *var,
QualType type) {
const bool isGroupShared = var->hasAttr<HLSLGroupSharedAttr>();
const bool isACSBuffer =
isAppendStructuredBuffer(type) || isConsumeStructuredBuffer(type);
const bool isRWSBuffer = isRWStructuredBuffer(type);
const auto storageClass = getStorageClassForExternVar(type, isGroupShared);
const auto rule = getLayoutRuleForExternVar(type, spirvOptions);
const auto loc = var->getLocation();
if (!isGroupShared && !isResourceType(type) &&
!isResourceOnlyStructure(type)) {
// We currently cannot support global structures that contain both resources
// and non-resources. That would require significant work in manipulating
// structure field decls, manipulating QualTypes, as well as inserting
// non-resources into the Globals cbuffer which changes offset decorations
// for it.
if (isStructureContainingMixOfResourcesAndNonResources(type)) {
emitError("global structures containing both resources and non-resources "
"are not supported",
loc);
return nullptr;
}
// This is a stand-alone externally-visiable non-resource-type variable.
// They should be grouped into the $Globals cbuffer. We create that cbuffer
// and record all variables inside it upon seeing the first such variable.
if (astDecls.count(var) == 0)
createGlobalsCBuffer(var);
auto *varInstr = astDecls[var].instr;
return varInstr ? cast<SpirvVariable>(varInstr) : nullptr;
}
if (isResourceOnlyStructure(type)) {
// We currently do not support global structures that contain buffers.
// Supporting global structures that contain buffers has two complications:
//
// 1- Buffers have the Uniform storage class, whereas Textures/Samplers have
// UniformConstant storage class. As a result, if a struct contains both
// textures and buffers, it is not clear what storage class should be used
// for the struct. Also legalization cannot deduce the proper storage class
// for struct members based on the structure's storage class.
//
// 2- Any kind of structured buffer has associated counters. The current DXC
// code is not written in a way to place associated counters inside a
// structure. Changing this behavior is non-trivial. There's also
// significant work to be done both in DXC (to properly generate binding
// numbers for the resource and its associated counters at correct offsets)
// and in spirv-opt (to flatten such strcutures and modify the binding
// numbers accordingly).
if (isStructureContainingAnyKindOfBuffer(type)) {
emitError("global structures containing buffers are not supported", loc);
return nullptr;
}
needsFlatteningCompositeResources = true;
}
const auto name = var->getName();
SpirvVariable *varInstr = spvBuilder.addModuleVar(
type, storageClass, var->hasAttr<HLSLPreciseAttr>(),
var->hasAttr<HLSLNoInterpolationAttr>(), name, llvm::None, loc);
varInstr->setLayoutRule(rule);
// If this variable has [[vk::combinedImageSampler]] and/or
// [[vk::image_format("..")]] attributes, we have to keep the information in
// the SpirvContext and use it when we lower the QualType to SpirvType.
VkImageFeatures vkImgFeatures = {
var->getAttr<VKCombinedImageSamplerAttr>() != nullptr,
getSpvImageFormat(var->getAttr<VKImageFormatAttr>())};
if (vkImgFeatures.format != spv::ImageFormat::Unknown) {
// Legalization is needed to propagate the correct image type for
// instructions in addition to cases where the resource is assigned to
// another variable or function parameter
needsLegalization = true;
}
if (vkImgFeatures.isCombinedImageSampler ||
vkImgFeatures.format != spv::ImageFormat::Unknown) {
spvContext.registerVkImageFeaturesForSpvVariable(varInstr, vkImgFeatures);
}
if (const auto *recordType = type->getAs<RecordType>()) {
StringRef typeName = recordType->getDecl()->getName();
if (typeName.startswith("FeedbackTexture")) {
emitError("Texture resource type '%0' is not supported with -spirv", loc)
<< typeName;
return nullptr;
}
}
if (hlsl::IsHLSLResourceType(type)) {
if (!areFormatAndTypeCompatible(vkImgFeatures.format,
hlsl::GetHLSLResourceResultType(type))) {
emitError("The image format and the sampled type are not compatible.\n"
"For the table of compatible types, see "
"https://docs.vulkan.org/spec/latest/appendices/"
"spirvenv.html#spirvenv-format-type-matching.",
loc);
return nullptr;
}
}
astDecls[var] = createDeclSpirvInfo(varInstr);
createDebugGlobalVariable(varInstr, type, loc, name);
// Variables in Workgroup do not need descriptor decorations.
if (storageClass == spv::StorageClass::Workgroup)
return varInstr;
const auto *bindingAttr = var->getAttr<VKBindingAttr>();
resourceVars.emplace_back(varInstr, var, loc, getResourceBinding(var),
bindingAttr, var->getAttr<VKCounterBindingAttr>());
if (const auto *inputAttachment = var->getAttr<VKInputAttachmentIndexAttr>())
spvBuilder.decorateInputAttachmentIndex(varInstr,
inputAttachment->getIndex(), loc);
if (isACSBuffer) {
// For {Append|Consume}StructuredBuffer, we need to always create another
// variable for its associated counter.
createCounterVar(var, varInstr, /*isAlias=*/false);
} else if (isRWSBuffer) {
declRWSBuffers[var] = varInstr;
}
return varInstr;
}
SpirvInstruction *DeclResultIdMapper::createResultId(const VarDecl *var) {
assert(isExtResultIdType(var->getType()));
// Without initialization, we cannot generate the result id.
if (!var->hasInit()) {
emitError("Found uninitialized variable for result id.",
var->getLocation());
return nullptr;
}
SpirvInstruction *init = theEmitter.doExpr(var->getInit());
astDecls[var] = createDeclSpirvInfo(init);
return init;
}
SpirvInstruction *
DeclResultIdMapper::createOrUpdateStringVar(const VarDecl *var) {
assert(hlsl::IsStringType(var->getType()) ||
hlsl::IsStringLiteralType(var->getType()));
// If the string variable is not initialized to a string literal, we cannot
// generate an OpString for it.
if (!var->hasInit()) {
emitError("Found uninitialized string variable.", var->getLocation());
return nullptr;
}
const StringLiteral *stringLiteral =
dyn_cast<StringLiteral>(var->getInit()->IgnoreParenCasts());
SpirvString *init = spvBuilder.getString(stringLiteral->getString());
astDecls[var] = createDeclSpirvInfo(init);
return init;
}
SpirvVariable *DeclResultIdMapper::createStructOrStructArrayVarOfExplicitLayout(
const DeclContext *decl, llvm::ArrayRef<int> arraySize,
const ContextUsageKind usageKind, llvm::StringRef typeName,
llvm::StringRef varName) {
// cbuffers are translated into OpTypeStruct with Block decoration.
// tbuffers are translated into OpTypeStruct with BufferBlock decoration.
// Push constants are translated into OpTypeStruct with Block decoration.
//
// Both cbuffers and tbuffers have the SPIR-V Uniform storage class.
// Push constants have the SPIR-V PushConstant storage class.
const bool forCBuffer = usageKind == ContextUsageKind::CBuffer;
const bool forTBuffer = usageKind == ContextUsageKind::TBuffer;
const bool forGlobals = usageKind == ContextUsageKind::Globals;
const bool forPC = usageKind == ContextUsageKind::PushConstant;
const bool forShaderRecordNV =
usageKind == ContextUsageKind::ShaderRecordBufferNV;
const bool forShaderRecordEXT =
usageKind == ContextUsageKind::ShaderRecordBufferKHR;
const auto &declGroup = collectDeclsInDeclContext(decl);
// Collect the type and name for each field
llvm::SmallVector<HybridStructType::FieldInfo, 4> fields;
for (const auto *subDecl : declGroup) {
// The field can only be FieldDecl (for normal structs) or VarDecl (for
// HLSLBufferDecls).
assert(isa<VarDecl>(subDecl) || isa<FieldDecl>(subDecl));
const auto *declDecl = cast<DeclaratorDecl>(subDecl);
auto varType = declDecl->getType();
if (const auto *fieldVar = dyn_cast<VarDecl>(subDecl)) {
// Static variables are not part of the struct from a layout perspective.
// Thus, they should not be listed in the struct fields.
if (fieldVar->getStorageClass() == StorageClass::SC_Static) {
continue;
}
if (isResourceType(varType)) {
createExternVar(fieldVar);
continue;
}
}
// In case 'register(c#)' annotation is placed on a global variable.
const hlsl::RegisterAssignment *registerC =
forGlobals ? getRegisterCAssignment(declDecl) : nullptr;
llvm::Optional<BitfieldInfo> bitfieldInfo;
{
const FieldDecl *Field = dyn_cast<FieldDecl>(subDecl);
if (Field && Field->isBitField()) {
bitfieldInfo = BitfieldInfo();
bitfieldInfo->sizeInBits =
Field->getBitWidthValue(Field->getASTContext());
}
}
// All fields are qualified with const. It will affect the debug name.
// We don't need it here.
varType.removeLocalConst();
HybridStructType::FieldInfo info(
varType, declDecl->getName(),
/*vkoffset*/ declDecl->getAttr<VKOffsetAttr>(),
/*packoffset*/ getPackOffset(declDecl),
/*RegisterAssignment*/ registerC,
/*isPrecise*/ declDecl->hasAttr<HLSLPreciseAttr>(),
/*bitfield*/ bitfieldInfo);
fields.push_back(info);
}
// Get the type for the whole struct
// tbuffer/TextureBuffers are non-writable SSBOs.
const SpirvType *resultType = spvContext.getHybridStructType(
fields, typeName, /*isReadOnly*/ forTBuffer,
forTBuffer ? StructInterfaceType::StorageBuffer
: StructInterfaceType::UniformBuffer);
for (int size : arraySize) {
if (size != -1) {
resultType = spvContext.getArrayType(resultType, size,
/*ArrayStride*/ llvm::None);
} else {
resultType = spvContext.getRuntimeArrayType(resultType,
/*ArrayStride*/ llvm::None);
}
}
const auto sc = forPC ? spv::StorageClass::PushConstant
: forShaderRecordNV ? spv::StorageClass::ShaderRecordBufferNV
: forShaderRecordEXT
? spv::StorageClass::ShaderRecordBufferKHR
: spv::StorageClass::Uniform;
// Create the variable for the whole struct / struct array.
// The fields may be 'precise', but the structure itself is not.
SpirvVariable *var = spvBuilder.addModuleVar(
resultType, sc, /*isPrecise*/ false, /*isNoInterp*/ false, varName);
const SpirvLayoutRule layoutRule =
(forCBuffer || forGlobals)
? spirvOptions.cBufferLayoutRule
: (forTBuffer ? spirvOptions.tBufferLayoutRule
: spirvOptions.sBufferLayoutRule);
var->setHlslUserType(forCBuffer ? "cbuffer" : forTBuffer ? "tbuffer" : "");
var->setLayoutRule(layoutRule);
return var;
}
SpirvVariable *DeclResultIdMapper::createStructOrStructArrayVarOfExplicitLayout(
const DeclContext *decl, int arraySize, const ContextUsageKind usageKind,
llvm::StringRef typeName, llvm::StringRef varName) {
llvm::SmallVector<int, 1> arraySizes;
if (arraySize > 0)
arraySizes.push_back(arraySize);
return createStructOrStructArrayVarOfExplicitLayout(
decl, arraySizes, usageKind, typeName, varName);
}
void DeclResultIdMapper::createEnumConstant(const EnumConstantDecl *decl) {
const auto *valueDecl = dyn_cast<ValueDecl>(decl);
const auto enumConstant =
spvBuilder.getConstantInt(astContext.IntTy, decl->getInitVal());
SpirvVariable *varInstr = spvBuilder.addModuleVar(
astContext.IntTy, spv::StorageClass::Private, /*isPrecise*/ false, false,
decl->getName(), enumConstant, decl->getLocation());
astDecls[valueDecl] = createDeclSpirvInfo(varInstr);
}
SpirvVariable *DeclResultIdMapper::createCTBuffer(const HLSLBufferDecl *decl) {
// This function handles creation of cbuffer or tbuffer.
const auto usageKind =
decl->isCBuffer() ? ContextUsageKind::CBuffer : ContextUsageKind::TBuffer;
const std::string structName = "type." + decl->getName().str();
// The front-end does not allow arrays of cbuffer/tbuffer.
SpirvVariable *bufferVar = createStructOrStructArrayVarOfExplicitLayout(
decl, /*arraySize*/ 0, usageKind, structName, decl->getName());
// We still register all VarDecls seperately here. All the VarDecls are
// mapped to the <result-id> of the buffer object, which means when querying
// querying the <result-id> for a certain VarDecl, we need to do an extra
// OpAccessChain.
int index = 0;
for (const auto *subDecl : decl->decls()) {
if (shouldSkipInStructLayout(subDecl))
continue;
// If subDecl is a variable with resource type, we already added a separate
// OpVariable for it in createStructOrStructArrayVarOfExplicitLayout().
const auto *varDecl = cast<VarDecl>(subDecl);
if (isResourceType(varDecl->getType()))
continue;
astDecls[varDecl] = createDeclSpirvInfo(bufferVar, index++);
}
// If it does not contains a member with non-resource type, we do not want to
// set a dedicated binding number.
if (index != 0) {
resourceVars.emplace_back(
bufferVar, decl, decl->getLocation(), getResourceBinding(decl),
decl->getAttr<VKBindingAttr>(), decl->getAttr<VKCounterBindingAttr>());
}
if (!spirvOptions.debugInfoRich) {
return bufferVar;
}
auto *dbgGlobalVar = createDebugGlobalVariable(
bufferVar, QualType(), decl->getLocation(), decl->getName());
assert(dbgGlobalVar);
(void)dbgGlobalVar; // For NDEBUG builds.
auto *resultType = bufferVar->getResultType();
// Depending on the requested layout (DX or VK), constant buffers is either a
// struct containing every constant fields, or a pointer to the type. This is
// caused by the workaround we implemented to support FXC/DX layout. See #3672
// for more details.
assert(isa<SpirvPointerType>(resultType) ||
isa<HybridStructType>(resultType));
if (auto *ptr = dyn_cast<SpirvPointerType>(resultType))
resultType = ptr->getPointeeType();
// Debug type lowering requires the HLSLBufferDecl. Updating the type<>decl
// mapping.
spvContext.registerStructDeclForSpirvType(resultType, decl);
return bufferVar;
}
SpirvVariable *DeclResultIdMapper::createPushConstant(const VarDecl *decl) {
// The front-end errors out if non-struct type push constant is used.
const QualType type = decl->getType();
const auto *recordType = type->getAs<RecordType>();
SpirvVariable *var = nullptr;
if (isConstantBuffer(type)) {
// Constant buffers already have Block decoration. The variable will need
// the PushConstant storage class.
// Create the variable for the whole struct / struct array.
// The fields may be 'precise', but the structure itself is not.
var = spvBuilder.addModuleVar(type, spv::StorageClass::PushConstant,
/*isPrecise*/ false,
/*isNoInterp*/ false, decl->getName());
const SpirvLayoutRule layoutRule = spirvOptions.sBufferLayoutRule;
var->setHlslUserType("");
var->setLayoutRule(layoutRule);
} else {
assert(recordType);
const std::string structName =
"type.PushConstant." + recordType->getDecl()->getName().str();
var = createStructOrStructArrayVarOfExplicitLayout(
recordType->getDecl(), /*arraySize*/ 0, ContextUsageKind::PushConstant,
structName, decl->getName());
}
// Register the VarDecl
astDecls[decl] = createDeclSpirvInfo(var);
// Do not push this variable into resourceVars since it does not need
// descriptor set.
return var;
}
SpirvVariable *
DeclResultIdMapper::createShaderRecordBuffer(const VarDecl *decl,
ContextUsageKind kind) {
const QualType type = decl->getType();
const auto *recordType =
hlsl::GetHLSLResourceResultType(type)->getAs<RecordType>();
assert(recordType);
assert(kind == ContextUsageKind::ShaderRecordBufferKHR ||
kind == ContextUsageKind::ShaderRecordBufferNV);
SpirvVariable *var = nullptr;
if (isConstantBuffer(type)) {
// Constant buffers already have Block decoration. The variable will need
// the appropriate storage class.
const auto sc = kind == ContextUsageKind::ShaderRecordBufferNV
? spv::StorageClass::ShaderRecordBufferNV
: spv::StorageClass::ShaderRecordBufferKHR;
// Create the variable for the whole struct / struct array.
// The fields may be 'precise', but the structure itself is not.
var = spvBuilder.addModuleVar(type, sc,
/*isPrecise*/ false,
/*isNoInterp*/ false, decl->getName());
const SpirvLayoutRule layoutRule = spirvOptions.sBufferLayoutRule;
var->setHlslUserType("");
var->setLayoutRule(layoutRule);
} else {
const auto typeName = kind == ContextUsageKind::ShaderRecordBufferKHR
? "type.ShaderRecordBufferKHR."
: "type.ShaderRecordBufferNV.";
const std::string structName =
typeName + recordType->getDecl()->getName().str();
var = createStructOrStructArrayVarOfExplicitLayout(
recordType->getDecl(), /*arraySize*/ 0, kind, structName,
decl->getName());
}
// Register the VarDecl
astDecls[decl] = createDeclSpirvInfo(var);
// Do not push this variable into resourceVars since it does not need
// descriptor set.
return var;
}
SpirvVariable *
DeclResultIdMapper::createShaderRecordBuffer(const HLSLBufferDecl *decl,
ContextUsageKind kind) {
assert(kind == ContextUsageKind::ShaderRecordBufferKHR ||
kind == ContextUsageKind::ShaderRecordBufferNV);
const auto typeName = kind == ContextUsageKind::ShaderRecordBufferKHR
? "type.ShaderRecordBufferKHR."
: "type.ShaderRecordBufferNV.";
const std::string structName = typeName + decl->getName().str();
// The front-end does not allow arrays of cbuffer/tbuffer.
SpirvVariable *bufferVar = createStructOrStructArrayVarOfExplicitLayout(
decl, /*arraySize*/ 0, kind, structName, decl->getName());
// We still register all VarDecls seperately here. All the VarDecls are
// mapped to the <result-id> of the buffer object, which means when
// querying the <result-id> for a certain VarDecl, we need to do an extra
// OpAccessChain.
int index = 0;
for (const auto *subDecl : decl->decls()) {
if (shouldSkipInStructLayout(subDecl))
continue;
// If subDecl is a variable with resource type, we already added a separate
// OpVariable for it in createStructOrStructArrayVarOfExplicitLayout().
const auto *varDecl = cast<VarDecl>(subDecl);
if (isResourceType(varDecl->getType()))
continue;
astDecls[varDecl] = createDeclSpirvInfo(bufferVar, index++);
}
return bufferVar;
}
void DeclResultIdMapper::recordsSpirvTypeAlias(const Decl *decl) {
auto *typedefDecl = dyn_cast<TypedefNameDecl>(decl);
if (!typedefDecl)
return;
if (!typedefDecl->hasAttr<VKCapabilityExtAttr>() &&
!typedefDecl->hasAttr<VKExtensionExtAttr>())
return;
typeAliasesWithAttributes.push_back(typedefDecl);
}
void DeclResultIdMapper::createGlobalsCBuffer(const VarDecl *var) {
if (astDecls.count(var) != 0)
return;
const auto *context = var->getTranslationUnitDecl();
SpirvVariable *globals = createStructOrStructArrayVarOfExplicitLayout(
context, /*arraySize*/ 0, ContextUsageKind::Globals, "type.$Globals",
"$Globals");
uint32_t index = 0;
for (const auto *decl : collectDeclsInDeclContext(context)) {
if (const auto *varDecl = dyn_cast<VarDecl>(decl)) {
if (!spirvOptions.noWarnIgnoredFeatures) {
if (const auto *init = varDecl->getInit())
emitWarning(
"variable '%0' will be placed in $Globals so initializer ignored",
init->getExprLoc())
<< var->getName() << init->getSourceRange();
}
if (const auto *attr = varDecl->getAttr<VKBindingAttr>()) {
emitError("variable '%0' will be placed in $Globals so cannot have "
"vk::binding attribute",
attr->getLocation())
<< var->getName();
return;
}
// If subDecl is a variable with resource type, we already added a
// separate OpVariable for it in
// createStructOrStructArrayVarOfExplicitLayout().
if (isResourceType(varDecl->getType()))
continue;
astDecls[varDecl] = createDeclSpirvInfo(globals, index++);
}
}
// If it does not contains a member with non-resource type, we do not want to
// set a dedicated binding number.
if (index != 0) {
resourceVars.emplace_back(globals, /*decl*/ nullptr, SourceLocation(),
nullptr, nullptr, nullptr, /*isCounterVar*/ false,
/*isGlobalsCBuffer*/ true);
}
}
SpirvFunction *DeclResultIdMapper::getOrRegisterFn(const FunctionDecl *fn) {
// Return it if it's already been created.
auto it = astFunctionDecls.find(fn);
if (it != astFunctionDecls.end()) {
return it->second;
}
bool isAlias = false;
(void)getTypeAndCreateCounterForPotentialAliasVar(fn, &isAlias);
const bool isPrecise = fn->hasAttr<HLSLPreciseAttr>();
const bool isNoInline = fn->hasAttr<NoInlineAttr>();
// Note: we do not need to worry about function parameter types at this point
// as this is used when function declarations are seen. When function
// definition is seen, the parameter types will be set properly and take into
// account whether the function is a member function of a class/struct (in
// which case a 'this' parameter is added at the beginnig).
SpirvFunction *spirvFunction = spvBuilder.createSpirvFunction(
fn->getReturnType(), fn->getLocation(),
getFunctionOrOperatorName(fn, true), isPrecise, isNoInline);
if (fn->getAttr<HLSLExportAttr>()) {
spvBuilder.decorateLinkage(nullptr, spirvFunction, fn->getName(),
spv::LinkageType::Export, fn->getLocation());
}
// No need to dereference to get the pointer. Function returns that are
// stand-alone aliases are already pointers to values. All other cases should
// be normal rvalues.
if (!isAlias || !isAKindOfStructuredOrByteBuffer(fn->getReturnType()))
spirvFunction->setRValue();
spirvFunction->setConstainsAliasComponent(isAlias);
astFunctionDecls[fn] = spirvFunction;
return spirvFunction;
}
const CounterIdAliasPair *DeclResultIdMapper::getCounterIdAliasPair(
const DeclaratorDecl *decl, const llvm::SmallVector<uint32_t, 4> *indices) {
if (!decl)
return nullptr;
if (indices) {
// Indices are provided. Walk through the fields of the decl.
const auto counter = fieldCounterVars.find(decl);
if (counter != fieldCounterVars.end())
return counter->second.get(*indices);
} else {
// No indices. Check the stand-alone entities. If not found,
// likely a deferred RWStructuredBuffer counter, so try
// creating it now.
auto counter = counterVars.find(decl);
if (counter == counterVars.end()) {
auto declInstr = declRWSBuffers[decl];
if (declInstr) {
createCounterVar(decl, declInstr, /*isAlias*/ false);
counter = counterVars.find(decl);
}
}
if (counter != counterVars.end())
return &counter->second;
}
return nullptr;
}
const CounterIdAliasPair *
DeclResultIdMapper::createOrGetCounterIdAliasPair(const DeclaratorDecl *decl) {
auto counterPair = getCounterIdAliasPair(decl);
if (counterPair)
return counterPair;
if (!decl)
return nullptr;
// If deferred RWStructuredBuffer, try creating the counter now
auto declInstr = declRWSBuffers[decl];
if (declInstr) {
createCounterVar(decl, declInstr, /*isAlias*/ false);
auto counter = counterVars.find(decl);
assert(counter != counterVars.end() && "counter not found");
return &counter->second;
}
return nullptr;
}
const CounterVarFields *
DeclResultIdMapper::getCounterVarFields(const DeclaratorDecl *decl) {
if (!decl)
return nullptr;
const auto found = fieldCounterVars.find(decl);
if (found != fieldCounterVars.end())
return &found->second;
return nullptr;
}
void DeclResultIdMapper::registerSpecConstant(const VarDecl *decl,
SpirvInstruction *specConstant) {
specConstant->setRValue();
astDecls[decl] = createDeclSpirvInfo(specConstant);
}
void DeclResultIdMapper::createCounterVar(
const DeclaratorDecl *decl, SpirvInstruction *declInstr, bool isAlias,
const llvm::SmallVector<uint32_t, 4> *indices) {
std::string counterName = "counter.var." + decl->getName().str();
if (indices) {
// Append field indices to the name
for (const auto index : *indices)
counterName += "." + std::to_string(index);
}
const SpirvType *counterType = spvContext.getACSBufferCounterType();
QualType declType = decl->getType();
if (declType->isArrayType()) {
// Vulkan does not support multi-dimentional arrays of resource, so we
// assume the array is a single dimensional array.
assert(!declType->getArrayElementTypeNoTypeQual()->isArrayType());
uint32_t arrayStride = 4;
if (const auto *constArrayType =
astContext.getAsConstantArrayType(declType)) {
counterType = spvContext.getArrayType(
counterType, constArrayType->getSize().getZExtValue(), arrayStride);
} else {
assert(declType->isIncompleteArrayType());
counterType = spvContext.getRuntimeArrayType(counterType, arrayStride);
}
} else if (isResourceDescriptorHeap(decl->getType()) ||
isSamplerDescriptorHeap(decl->getType())) {
counterType =
spvContext.getRuntimeArrayType(counterType, /* arrayStride= */ 4);
}
// {RW|Append|Consume}StructuredBuffer are all in Uniform storage class.
// Alias counter variables should be created into the Private storage class.
const spv::StorageClass sc =
isAlias ? spv::StorageClass::Private : spv::StorageClass::Uniform;
if (isAlias) {
// Apply an extra level of pointer for alias counter variable
counterType =
spvContext.getPointerType(counterType, spv::StorageClass::Uniform);
}
SpirvVariable *counterInstr = spvBuilder.addModuleVar(
counterType, sc, /*isPrecise*/ false, false, counterName);
if (!isAlias) {
// Non-alias counter variables should be put in to resourceVars so that
// descriptors can be allocated for them.
resourceVars.emplace_back(counterInstr, decl, decl->getLocation(),
getResourceBinding(decl),
decl->getAttr<VKBindingAttr>(),
decl->getAttr<VKCounterBindingAttr>(), true);
assert(declInstr);
spvBuilder.decorateCounterBuffer(declInstr, counterInstr,
decl->getLocation());
}
if (indices)
fieldCounterVars[decl].append(*indices, counterInstr);
else
counterVars[decl] = {counterInstr, isAlias};
}
void DeclResultIdMapper::createFieldCounterVars(
const DeclaratorDecl *rootDecl, const DeclaratorDecl *decl,
llvm::SmallVector<uint32_t, 4> *indices) {
const QualType type = getTypeOrFnRetType(decl);
const auto *recordType = type->getAs<RecordType>();
assert(recordType);
const auto *recordDecl = recordType->getDecl();
for (const auto *field : recordDecl->fields()) {
// Build up the index chain
indices->push_back(getNumBaseClasses(type) + field->getFieldIndex());
const QualType fieldType = field->getType();
if (isRWAppendConsumeSBuffer(fieldType))
createCounterVar(rootDecl, /*declId=*/0, /*isAlias=*/true, indices);
else if (fieldType->isStructureType() &&
!hlsl::IsHLSLResourceType(fieldType))
// Go recursively into all nested structs
createFieldCounterVars(rootDecl, field, indices);
indices->pop_back();
}
}
std::vector<SpirvVariable *>
DeclResultIdMapper::collectStageVars(SpirvFunction *entryPoint) const {
std::vector<SpirvVariable *> vars;
for (auto var : glPerVertex.getStageInVars())
vars.push_back(var);
for (auto var : glPerVertex.getStageOutVars())
vars.push_back(var);
for (const auto &var : stageVars) {
// We must collect stage variables that are included in entryPoint and stage
// variables that are not included in any specific entryPoint i.e.,
// var.getEntryPoint() is nullptr. Note that stage variables without any
// specific entry point are common stage variables among all entry points.
if (var.getEntryPoint() && var.getEntryPoint() != entryPoint)
continue;
auto *instr = var.getSpirvInstr();
if (instr->getStorageClass() == spv::StorageClass::Private)
continue;
vars.push_back(instr);
}
return vars;
}
namespace {
/// A class for managing stage input/output locations to avoid duplicate uses of
/// the same location.
class LocationSet {
public:
/// Maximum number of indices supported
const static uint32_t kMaxIndex = 2;
// Creates an empty set.
LocationSet() {
for (uint32_t i = 0; i < kMaxIndex; ++i) {
// Default size. 64 should cover most cases without having to resize.
usedLocations[i].resize(64);
nextAvailableLocation[i] = 0;
}
}
/// Marks a given location as used.
void useLocation(uint32_t loc, uint32_t index = 0) {
assert(index < kMaxIndex);
auto &set = usedLocations[index];
if (loc >= set.size()) {
set.resize(std::max<size_t>(loc + 1, set.size() * 2));
}
set.set(loc);
nextAvailableLocation[index] =
std::max(loc + 1, nextAvailableLocation[index]);
}
// Find the first range of size |count| of unused locations,
// and marks them as used.
// Returns the first index of this range.
int useNextNLocations(uint32_t count, uint32_t index = 0) {
auto res = findUnusedRange(index, count);
auto &locations = usedLocations[index];
// Simple case: no hole large enough left, resizing.
if (res == std::nullopt) {
const uint32_t spaceLeft =
locations.size() - nextAvailableLocation[index];
assert(spaceLeft < count && "There is a bug.");
const uint32_t requiredAlloc = count - spaceLeft;
locations.resize(locations.size() + requiredAlloc);
res = nextAvailableLocation[index];
}
for (uint32_t i = res.value(); i < res.value() + count; i++) {
locations.set(i);
}
nextAvailableLocation[index] =
std::max(res.value() + count, nextAvailableLocation[index]);
return res.value();
}
/// Returns true if the given location number is already used.
bool isLocationUsed(uint32_t loc, uint32_t index = 0) {
assert(index < kMaxIndex);
if (loc >= usedLocations[index].size())
return false;
return usedLocations[index][loc];
}
private:
// Find the first unused range of size |size| in the given set.
// If the set contains such range, returns the first usable index.
// Otherwise, nullopt is returned.
std::optional<uint32_t> findUnusedRange(uint32_t index, uint32_t size) {
if (size == 0) {
return 0;
}
assert(index < kMaxIndex);
const auto &locations = usedLocations[index];
uint32_t required_size = size;
uint32_t start = 0;
for (uint32_t i = 0; i < locations.size() && required_size > 0; i++) {
if (!locations[i]) {
--required_size;
continue;
}
required_size = size;
start = i + 1;
}
return required_size == 0 ? std::optional(start) : std::nullopt;
}
// The sets to remember used locations. A set bit means the location is used.
/// All previously used locations
llvm::SmallBitVector usedLocations[kMaxIndex];
// The position of the last bit set in the usedLocation vector.
uint32_t nextAvailableLocation[kMaxIndex];
};
} // namespace
/// A class for managing resource bindings to avoid duplicate uses of the same
/// set and binding number.
class DeclResultIdMapper::BindingSet {
public:
/// Uses the given set and binding number. Returns false if the binding number
/// was already occupied in the set, and returns true otherwise.
bool useBinding(uint32_t binding, uint32_t set) {
bool inserted = false;
std::tie(std::ignore, inserted) = usedBindings[set].insert(binding);
return inserted;
}
/// Uses the next available binding number in |set|. If more than one binding
/// number is to be occupied, it finds the next available chunk that can fit
/// |numBindingsToUse| in the |set|.
uint32_t useNextBinding(uint32_t set, uint32_t numBindingsToUse = 1,
uint32_t bindingShift = 0) {
uint32_t bindingNoStart =
getNextBindingChunk(set, numBindingsToUse, bindingShift);
auto &binding = usedBindings[set];
for (uint32_t i = 0; i < numBindingsToUse; ++i)
binding.insert(bindingNoStart + i);
return bindingNoStart;
}
/// Returns the first available binding number in the |set| for which |n|
/// consecutive binding numbers are unused starting at |bindingShift|.
uint32_t getNextBindingChunk(uint32_t set, uint32_t n,
uint32_t bindingShift) {
auto &existingBindings = usedBindings[set];
// There were no bindings in this set. Can start at binding zero.
if (existingBindings.empty())
return bindingShift;
// Check whether the chunk of |n| binding numbers can be fitted at the
// very beginning of the list (start at binding 0 in the current set).
uint32_t curBinding = *existingBindings.begin();
if (curBinding >= (n + bindingShift))
return bindingShift;
auto iter = std::next(existingBindings.begin());
while (iter != existingBindings.end()) {
// There exists a next binding number that is used. Check to see if the
// gap between current binding number and next binding number is large
// enough to accommodate |n|.
uint32_t nextBinding = *iter;
if ((bindingShift > 0) && (curBinding < (bindingShift - 1)))
curBinding = bindingShift - 1;
if (curBinding < nextBinding && n <= nextBinding - curBinding - 1)
return curBinding + 1;
curBinding = nextBinding;
// Peek at the next binding that has already been used (if any).
++iter;
}
// |curBinding| was the last binding that was used in this set. The next
// chunk of |n| bindings can start at |curBinding|+1.
return std::max(curBinding + 1, bindingShift);
}
private:
///< set number -> set of used binding number
llvm::DenseMap<uint32_t, std::set<uint32_t>> usedBindings;
};
bool DeclResultIdMapper::checkSemanticDuplication(bool forInput) {
// Mapping from entry points to the corresponding set of semantics.
llvm::SmallDenseMap<SpirvFunction *, llvm::StringSet<>>
seenSemanticsForEntryPoints;
bool success = true;
for (const auto &var : stageVars) {
auto s = var.getSemanticStr();
if (s.empty()) {
// We translate WaveGetLaneCount(), WaveGetLaneIndex() and 'payload' param
// block declaration into builtin variables. Those variables are inserted
// into the normal stage IO processing pipeline, but with the semantics as
// empty strings.
assert(var.isSpirvBuitin());
continue;
}
if (forInput && var.getSigPoint()->IsInput()) {
bool insertionSuccess = insertSeenSemanticsForEntryPointIfNotExist(
&seenSemanticsForEntryPoints, var.getEntryPoint(), s);
if (!insertionSuccess) {
emitError("input semantic '%0' used more than once",
var.getSemanticInfo().loc)
<< s;
success = false;
}
} else if (!forInput && var.getSigPoint()->IsOutput()) {
bool insertionSuccess = insertSeenSemanticsForEntryPointIfNotExist(
&seenSemanticsForEntryPoints, var.getEntryPoint(), s);
if (!insertionSuccess) {
emitError("output semantic '%0' used more than once",
var.getSemanticInfo().loc)
<< s;
success = false;
}
}
}
return success;
}
bool DeclResultIdMapper::isDuplicatedStageVarLocation(
llvm::DenseSet<StageVariableLocationInfo, StageVariableLocationInfo>
*stageVariableLocationInfo,
const StageVar &var, uint32_t location, uint32_t index) {
if (!stageVariableLocationInfo
->insert({var.getEntryPoint(),
var.getSpirvInstr()->getStorageClass(), location, index})
.second) {
emitError("Multiple stage variables have a duplicated pair of "
"location and index at %0 / %1",
var.getSpirvInstr()->getSourceLocation())
<< location << index;
return false;
}
return true;
}
bool DeclResultIdMapper::assignLocations(
const std::vector<const StageVar *> &vars,
llvm::function_ref<uint32_t(uint32_t)> nextLocs,
llvm::DenseSet<StageVariableLocationInfo, StageVariableLocationInfo>
*stageVariableLocationInfo) {
for (const auto *var : vars) {
auto locCount = var->getLocationCount();
uint32_t location = nextLocs(locCount);
spvBuilder.decorateLocation(var->getSpirvInstr(), location);
if (!isDuplicatedStageVarLocation(stageVariableLocationInfo, *var, location,
0)) {
return false;
}
}
return true;
}
bool DeclResultIdMapper::finalizeStageIOLocationsForASingleEntryPoint(
bool forInput, ArrayRef<StageVar> functionStageVars) {
// Returns false if the given StageVar is an input/output variable without
// explicit location assignment. Otherwise, returns true.
const auto locAssigned = [forInput, this](const StageVar &v) {
if (forInput == isInputStorageClass(v)) {
// No need to assign location for builtins. Treat as assigned.
return v.isSpirvBuitin() || v.hasLocOrBuiltinDecorateAttr() ||
v.getLocationAttr() != nullptr;
}
// For the ones we don't care, treat as assigned.
return true;
};
/// Set of locations of assigned stage variables used to correctly report
/// duplicated stage variable locations.
llvm::DenseSet<StageVariableLocationInfo, StageVariableLocationInfo>
stageVariableLocationInfo;
// If we have explicit location specified for all input/output variables,
// use them instead assign by ourselves.
if (std::all_of(functionStageVars.begin(), functionStageVars.end(),
locAssigned)) {
LocationSet locSet;
bool noError = true;
for (const auto &var : functionStageVars) {
// Skip builtins & those stage variables we are not handling for this call
if (var.isSpirvBuitin() || var.hasLocOrBuiltinDecorateAttr() ||
forInput != isInputStorageClass(var)) {
continue;
}
const auto *attr = var.getLocationAttr();
const auto loc = attr->getNumber();
const auto locCount = var.getLocationCount();
const auto attrLoc = attr->getLocation(); // Attr source code location
const auto idx = var.getIndexAttr() ? var.getIndexAttr()->getNumber() : 0;
// Make sure the same location is not assigned more than once
for (uint32_t l = loc; l < loc + locCount; ++l) {
if (locSet.isLocationUsed(l, idx)) {
emitError("stage %select{output|input}0 location #%1 already "
"consumed by semantic '%2'",
attrLoc)
<< forInput << l << functionStageVars[idx].getSemanticStr();
noError = false;
}
locSet.useLocation(l, idx);
}
spvBuilder.decorateLocation(var.getSpirvInstr(), loc);
if (var.getIndexAttr())
spvBuilder.decorateIndex(var.getSpirvInstr(), idx,
var.getSemanticInfo().loc);
if (!isDuplicatedStageVarLocation(&stageVariableLocationInfo, var, loc,
idx)) {
return false;
}
}
return noError;
}
std::vector<const StageVar *> vars;
LocationSet locSet;
for (const auto &var : functionStageVars) {
if (var.isSpirvBuitin() || var.hasLocOrBuiltinDecorateAttr() ||
forInput != isInputStorageClass(var)) {
continue;
}
if (var.getLocationAttr()) {
// We have checked that not all of the stage variables have explicit
// location assignment.
emitError("partial explicit stage %select{output|input}0 location "
"assignment via vk::location(X) unsupported",
{})
<< forInput;
return false;
}
const auto &semaInfo = var.getSemanticInfo();
// We should special rules for SV_Target: the location number comes from the
// semantic string index.
if (semaInfo.isTarget()) {
spvBuilder.decorateLocation(var.getSpirvInstr(), semaInfo.index);
locSet.useLocation(semaInfo.index);
if (!isDuplicatedStageVarLocation(&stageVariableLocationInfo, var,
semaInfo.index, 0)) {
return false;
}
} else {
vars.push_back(&var);
}
}
if (vars.empty())
return true;
auto nextLocs = [&locSet](uint32_t locCount) {
return locSet.useNextNLocations(locCount);
};
// If alphabetical ordering was requested, sort by semantic string.
if (spirvOptions.stageIoOrder == "alpha") {
// Sort stage input/output variables alphabetically
std::stable_sort(vars.begin(), vars.end(),
[](const StageVar *a, const StageVar *b) {
return a->getSemanticStr() < b->getSemanticStr();
});
return assignLocations(vars, nextLocs, &stageVariableLocationInfo);
}
// Pack signature if it is enabled. Vertext shader input and pixel
// shader output are special. We have to preserve the given signature.
auto sigPointKind = vars[0]->getSigPoint()->GetKind();
if (spirvOptions.signaturePacking &&
sigPointKind != hlsl::SigPoint::Kind::VSIn &&
sigPointKind != hlsl::SigPoint::Kind::PSOut) {
return packSignature(spvBuilder, vars, nextLocs, forInput);
}
// Since HS includes 2 sets of outputs (patch-constant output and
// OutputPatch), running into location mismatches between HS and DS is very
// likely. In order to avoid location mismatches between HS and DS, use
// alphabetical ordering.
if ((!forInput && spvContext.isHS()) || (forInput && spvContext.isDS())) {
// Sort stage input/output variables alphabetically
std::stable_sort(vars.begin(), vars.end(),
[](const StageVar *a, const StageVar *b) {
return a->getSemanticStr() < b->getSemanticStr();
});
}
return assignLocations(vars, nextLocs, &stageVariableLocationInfo);
}
llvm::DenseMap<const SpirvFunction *, SmallVector<StageVar, 8>>
DeclResultIdMapper::getStageVarsPerFunction() {
llvm::DenseMap<const SpirvFunction *, SmallVector<StageVar, 8>> result;
for (const auto &var : stageVars) {
result[var.getEntryPoint()].push_back(var);
}
return result;
}
bool DeclResultIdMapper::finalizeStageIOLocations(bool forInput) {
if (!checkSemanticDuplication(forInput))
return false;
auto stageVarPerFunction = getStageVarsPerFunction();
for (const auto &functionStageVars : stageVarPerFunction) {
if (!finalizeStageIOLocationsForASingleEntryPoint(
forInput, functionStageVars.getSecond())) {
return false;
}
}
return true;
}
namespace {
/// A class for maintaining the binding number shift requested for descriptor
/// sets.
class BindingShiftMapper {
public:
explicit BindingShiftMapper(const llvm::SmallVectorImpl<int32_t> &shifts)
: masterShift(0) {
assert(shifts.size() % 2 == 0);
if (shifts.size() == 2 && shifts[1] == -1) {
masterShift = shifts[0];
} else {
for (uint32_t i = 0; i < shifts.size(); i += 2)
perSetShift[shifts[i + 1]] = shifts[i];
}
}
/// Returns the shift amount for the given set.
int32_t getShiftForSet(int32_t set) const {
const auto found = perSetShift.find(set);
if (found != perSetShift.end())
return found->second;
return masterShift;
}
private:
uint32_t masterShift; /// Shift amount applies to all sets.
llvm::DenseMap<int32_t, int32_t> perSetShift;
};
/// A class for maintaining the mapping from source code register attributes to
/// descriptor set and number settings.
class RegisterBindingMapper {
public:
/// Takes in the relation between register attributes and descriptor settings.
/// Each relation is represented by four strings:
/// <register-type-number> <space> <descriptor-binding> <set>
bool takeInRelation(const std::vector<std::string> &relation,
std::string *error) {
assert(relation.size() % 4 == 0);
mapping.clear();
for (uint32_t i = 0; i < relation.size(); i += 4) {
int32_t spaceNo = -1, setNo = -1, bindNo = -1;
if (StringRef(relation[i + 1]).getAsInteger(10, spaceNo) || spaceNo < 0) {
*error = "space number: " + relation[i + 1];
return false;
}
if (StringRef(relation[i + 2]).getAsInteger(10, bindNo) || bindNo < 0) {
*error = "binding number: " + relation[i + 2];
return false;
}
if (StringRef(relation[i + 3]).getAsInteger(10, setNo) || setNo < 0) {
*error = "set number: " + relation[i + 3];
return false;
}
mapping[relation[i + 1] + relation[i]] = std::make_pair(setNo, bindNo);
}
return true;
}
/// Returns true and set the correct set and binding number if we can find a
/// descriptor setting for the given register. False otherwise.
bool getSetBinding(const hlsl::RegisterAssignment *regAttr,
uint32_t defaultSpace, int *setNo, int *bindNo) const {
std::ostringstream iss;
iss << regAttr->RegisterSpace.getValueOr(defaultSpace)
<< regAttr->RegisterType << regAttr->RegisterNumber;
auto found = mapping.find(iss.str());
if (found != mapping.end()) {
*setNo = found->second.first;
*bindNo = found->second.second;
return true;
}
return false;
}
private:
llvm::StringMap<std::pair<int, int>> mapping;
};
} // namespace
bool DeclResultIdMapper::decorateResourceBindings() {
// For normal resource, we support 4 approaches of setting binding numbers:
// - m1: [[vk::binding(...)]]
// - m2: :register(xX, spaceY)
// - m3: None
// - m4: :register(spaceY)
//
// For associated counters, we support 2 approaches:
// - c1: [[vk::counter_binding(...)]
// - c2: None
//
// In combination, we need to handle 12 cases:
// - 4 cases for nomral resoures (m1, m2, m3, m4)
// - 8 cases for associated counters (mX * cY)
//
// In the following order:
// - m1, mX * c1
// - m2
// - m3, m4, mX * c2
// The "-auto-binding-space" command line option can be used to specify a
// certain space as default. UINT_MAX means the user has not provided this
// option. If not provided, the SPIR-V backend uses space "0" as default.
auto defaultSpaceOpt =
theEmitter.getCompilerInstance().getCodeGenOpts().HLSLDefaultSpace;
uint32_t defaultSpace = (defaultSpaceOpt == UINT_MAX) ? 0 : defaultSpaceOpt;
const bool bindGlobals = !spirvOptions.bindGlobals.empty();
int32_t globalsBindNo = -1, globalsSetNo = -1;
if (bindGlobals) {
assert(spirvOptions.bindGlobals.size() == 2);
if (StringRef(spirvOptions.bindGlobals[0])
.getAsInteger(10, globalsBindNo) ||
globalsBindNo < 0) {
emitError("invalid -fvk-bind-globals binding number: %0", {})
<< spirvOptions.bindGlobals[0];
return false;
}
if (StringRef(spirvOptions.bindGlobals[1]).getAsInteger(10, globalsSetNo) ||
globalsSetNo < 0) {
emitError("invalid -fvk-bind-globals set number: %0", {})
<< spirvOptions.bindGlobals[1];
return false;
}
}
// Special handling of -fvk-bind-register, which requires
// * All resources are annoated with :register() in the source code
// * -fvk-bind-register is specified for every resource
if (!spirvOptions.bindRegister.empty()) {
RegisterBindingMapper bindingMapper;
std::string error;
if (!bindingMapper.takeInRelation(spirvOptions.bindRegister, &error)) {
emitError("invalid -fvk-bind-register %0", {}) << error;
return false;
}
for (const auto &var : resourceVars)
if (const auto *regAttr = var.getRegister()) {
if (var.isCounter()) {
emitError("-fvk-bind-register for RW/Append/Consume StructuredBuffer "
"unimplemented",
var.getSourceLocation());
} else {
int setNo = 0, bindNo = 0;
if (!bindingMapper.getSetBinding(regAttr, defaultSpace, &setNo,
&bindNo)) {
emitError("missing -fvk-bind-register for resource",
var.getSourceLocation());
return false;
}
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), setNo, bindNo);
}
} else if (var.isGlobalsBuffer()) {
if (!bindGlobals) {
emitError("-fvk-bind-register requires Globals buffer to be bound "
"with -fvk-bind-globals",
var.getSourceLocation());
return false;
}
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), globalsSetNo,
globalsBindNo);
} else {
emitError(
"-fvk-bind-register requires register annotations on all resources",
var.getSourceLocation());
return false;
}
return true;
}
BindingSet bindingSet;
// Decorates the given varId of the given category with set number
// setNo, binding number bindingNo. Ignores overlaps.
const auto tryToDecorate = [this, &bindingSet](const ResourceVar &var,
const uint32_t setNo,
const uint32_t bindingNo) {
// By default we use one binding number per resource, and an array of
// resources also gets only one binding number. However, for array of
// resources (e.g. array of textures), DX uses one binding number per array
// element. We can match this behavior via a command line option.
uint32_t numBindingsToUse = 1;
if (spirvOptions.flattenResourceArrays || needsFlatteningCompositeResources)
numBindingsToUse = getNumBindingsUsedByResourceType(
var.getSpirvInstr()->getAstResultType());
for (uint32_t i = 0; i < numBindingsToUse; ++i) {
bool success = bindingSet.useBinding(bindingNo + i, setNo);
// We will not emit an error if we find a set/binding overlap because it
// is possible that the optimizer optimizes away a resource which resolves
// the overlap.
(void)success;
}
// No need to decorate multiple binding numbers for arrays. It will be done
// by legalization/optimization.
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), setNo, bindingNo);
};
for (const auto &var : resourceVars) {
if (var.isCounter()) {
if (const auto *vkCBinding = var.getCounterBinding()) {
// Process mX * c1
uint32_t set = defaultSpace;
if (const auto *vkBinding = var.getBinding())
set = getVkBindingAttrSet(vkBinding, defaultSpace);
else if (const auto *reg = var.getRegister())
set = reg->RegisterSpace.getValueOr(defaultSpace);
tryToDecorate(var, set, vkCBinding->getBinding());
}
} else {
if (const auto *vkBinding = var.getBinding()) {
// Process m1
tryToDecorate(var, getVkBindingAttrSet(vkBinding, defaultSpace),
vkBinding->getBinding());
}
}
}
BindingShiftMapper bShiftMapper(spirvOptions.bShift);
BindingShiftMapper tShiftMapper(spirvOptions.tShift);
BindingShiftMapper sShiftMapper(spirvOptions.sShift);
BindingShiftMapper uShiftMapper(spirvOptions.uShift);
// Process m2
for (const auto &var : resourceVars)
if (!var.isCounter() && !var.getBinding())
if (const auto *reg = var.getRegister()) {
// Skip space-only register() annotations
if (reg->isSpaceOnly())
continue;
const uint32_t set = reg->RegisterSpace.getValueOr(defaultSpace);
uint32_t binding = reg->RegisterNumber;
switch (reg->RegisterType) {
case 'b':
binding += bShiftMapper.getShiftForSet(set);
break;
case 't':
binding += tShiftMapper.getShiftForSet(set);
break;
case 's':
// For combined texture and sampler resources, always use the t shift
// value and ignore the s shift value.
if (const auto *decl = var.getDeclaration()) {
if (decl->getAttr<VKCombinedImageSamplerAttr>() != nullptr) {
binding += tShiftMapper.getShiftForSet(set);
break;
}
}
binding += sShiftMapper.getShiftForSet(set);
break;
case 'u':
binding += uShiftMapper.getShiftForSet(set);
break;
case 'c':
// For setting packing offset. Does not affect binding.
break;
default:
llvm_unreachable("unknown register type found");
}
tryToDecorate(var, set, binding);
}
for (const auto &var : resourceVars) {
// By default we use one binding number per resource, and an array of
// resources also gets only one binding number. However, for array of
// resources (e.g. array of textures), DX uses one binding number per array
// element. We can match this behavior via a command line option.
uint32_t numBindingsToUse = 1;
if (spirvOptions.flattenResourceArrays || needsFlatteningCompositeResources)
numBindingsToUse = getNumBindingsUsedByResourceType(
var.getSpirvInstr()->getAstResultType());
BindingShiftMapper *bindingShiftMapper = nullptr;
if (spirvOptions.autoShiftBindings) {
char registerType = '\0';
if (getImplicitRegisterType(var, ®isterType)) {
switch (registerType) {
case 'b':
bindingShiftMapper = &bShiftMapper;
break;
case 't':
bindingShiftMapper = &tShiftMapper;
break;
case 's':
bindingShiftMapper = &sShiftMapper;
break;
case 'u':
bindingShiftMapper = &uShiftMapper;
break;
default:
llvm_unreachable("unknown register type found");
}
}
}
if (var.getDeclaration()) {
const VarDecl *decl = dyn_cast<VarDecl>(var.getDeclaration());
if (decl && (isResourceDescriptorHeap(decl->getType()) ||
isSamplerDescriptorHeap(decl->getType())))
continue;
}
if (var.isCounter()) {
if (!var.getCounterBinding()) {
// Process mX * c2
uint32_t set = defaultSpace;
if (const auto *vkBinding = var.getBinding())
set = getVkBindingAttrSet(vkBinding, defaultSpace);
else if (const auto *reg = var.getRegister())
set = reg->RegisterSpace.getValueOr(defaultSpace);
uint32_t bindingShift = 0;
if (bindingShiftMapper)
bindingShift = bindingShiftMapper->getShiftForSet(set);
spvBuilder.decorateDSetBinding(
var.getSpirvInstr(), set,
bindingSet.useNextBinding(set, numBindingsToUse, bindingShift));
}
} else if (!var.getBinding()) {
const auto *reg = var.getRegister();
if (reg && reg->isSpaceOnly()) {
const uint32_t set = reg->RegisterSpace.getValueOr(defaultSpace);
uint32_t bindingShift = 0;
if (bindingShiftMapper)
bindingShift = bindingShiftMapper->getShiftForSet(set);
spvBuilder.decorateDSetBinding(
var.getSpirvInstr(), set,
bindingSet.useNextBinding(set, numBindingsToUse, bindingShift));
} else if (!reg) {
// Process m3 (no 'vk::binding' and no ':register' assignment)
// There is a special case for the $Globals cbuffer. The $Globals buffer
// doesn't have either 'vk::binding' or ':register', but the user may
// ask for a specific binding for it via command line options.
if (bindGlobals && var.isGlobalsBuffer()) {
uint32_t bindingShift = 0;
if (bindingShiftMapper)
bindingShift = bindingShiftMapper->getShiftForSet(globalsSetNo);
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), globalsSetNo,
globalsBindNo + bindingShift);
}
// The normal case
else {
uint32_t bindingShift = 0;
if (bindingShiftMapper)
bindingShift = bindingShiftMapper->getShiftForSet(defaultSpace);
spvBuilder.decorateDSetBinding(
var.getSpirvInstr(), defaultSpace,
bindingSet.useNextBinding(defaultSpace, numBindingsToUse,
bindingShift));
}
}
}
}
decorateResourceHeapsBindings(bindingSet);
return true;
}
void DeclResultIdMapper::decorateResourceHeapsBindings(BindingSet &bindingSet) {
bool hasResource = false;
bool hasSamplers = false;
bool hasCounters = false;
// Determine which type of heap resource is used to lazily allocation
// bindings.
for (const auto &var : resourceVars) {
if (!var.getDeclaration())
continue;
const VarDecl *decl = dyn_cast<VarDecl>(var.getDeclaration());
if (!decl)
continue;
const bool isResourceHeap = isResourceDescriptorHeap(decl->getType());
const bool isSamplerHeap = isSamplerDescriptorHeap(decl->getType());
assert(!(var.isCounter() && isSamplerHeap));
hasResource |= isResourceHeap;
hasSamplers |= isSamplerHeap;
hasCounters |= isResourceHeap && var.isCounter();
}
// Allocate bindings only for used resources. The order of this allocation is
// important:
// - First resource heaps, then sampler heaps, and finally counter heaps.
const uint32_t resourceBinding =
hasResource ? bindingSet.useNextBinding(0) : 0;
const uint32_t samplersBinding =
hasSamplers ? bindingSet.useNextBinding(0) : 0;
const uint32_t countersBinding =
hasCounters ? bindingSet.useNextBinding(0) : 0;
for (const auto &var : resourceVars) {
if (!var.getDeclaration())
continue;
const VarDecl *decl = dyn_cast<VarDecl>(var.getDeclaration());
if (!decl)
continue;
if (isResourceDescriptorHeap(decl->getType()))
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), /* set= */ 0,
var.isCounter() ? countersBinding
: resourceBinding);
else if (isSamplerDescriptorHeap(decl->getType()))
spvBuilder.decorateDSetBinding(var.getSpirvInstr(), /* set= */ 0,
samplersBinding);
}
}
bool DeclResultIdMapper::decorateResourceCoherent() {
for (const auto &var : resourceVars) {
if (const auto *decl = var.getDeclaration()) {
if (decl->getAttr<HLSLGloballyCoherentAttr>()) {
spvBuilder.decorateCoherent(var.getSpirvInstr(),
var.getSourceLocation());
}
}
}
return true;
}
bool DeclResultIdMapper::createStructOutputVar(
const StageVarDataBundle &stageVarData, SpirvInstruction *value,
bool noWriteBack) {
// If we have base classes, we need to handle them first.
if (const auto *cxxDecl = stageVarData.type->getAsCXXRecordDecl()) {
uint32_t baseIndex = 0;
for (auto base : cxxDecl->bases()) {
SpirvInstruction *subValue = nullptr;
if (!noWriteBack)
subValue = spvBuilder.createCompositeExtract(
base.getType(), value, {baseIndex++},
stageVarData.decl->getLocation());
StageVarDataBundle memberVarData = stageVarData;
memberVarData.decl = base.getType()->getAsCXXRecordDecl();
memberVarData.type = base.getType();
if (!createStageVars(memberVarData, false, &subValue, noWriteBack))
return false;
}
}
// Unlike reading, which may require us to read stand-alone builtins and
// stage input variables and compose an array of structs out of them,
// it happens that we don't need to write an array of structs in a bunch
// for all shader stages:
//
// * VS: output is a single struct, without extra arrayness
// * HS: output is an array of structs, with extra arrayness,
// but we only write to the struct at the InvocationID index
// * DS: output is a single struct, without extra arrayness
// * GS: output is controlled by OpEmitVertex, one vertex per time
// * MS: output is an array of structs, with extra arrayness
//
// The interesting shader stage is HS. We need the InvocationID to write
// out the value to the correct array element.
const auto *structDecl = stageVarData.type->getAs<RecordType>()->getDecl();
for (const auto *field : structDecl->fields()) {
const auto fieldType = field->getType();
SpirvInstruction *subValue = nullptr;
if (!noWriteBack) {
subValue = spvBuilder.createCompositeExtract(
fieldType, value,
{getNumBaseClasses(stageVarData.type) + field->getFieldIndex()},
stageVarData.decl->getLocation());
if (field->hasAttr<HLSLNoInterpolationAttr>() ||
structDecl->hasAttr<HLSLNoInterpolationAttr>())
subValue->setNoninterpolated();
}
StageVarDataBundle memberVarData = stageVarData;
memberVarData.decl = field;
memberVarData.type = field->getType();
memberVarData.asNoInterp |= field->hasAttr<HLSLNoInterpolationAttr>();
if (!createStageVars(memberVarData, false, &subValue, noWriteBack))
return false;
}
return true;
}
SpirvInstruction *
DeclResultIdMapper::createStructInputVar(const StageVarDataBundle &stageVarData,
bool noWriteBack) {
// If this decl translates into multiple stage input variables, we need to
// load their values into a composite.
llvm::SmallVector<SpirvInstruction *, 4> subValues;
// If we have base classes, we need to handle them first.
if (const auto *cxxDecl = stageVarData.type->getAsCXXRecordDecl()) {
for (auto base : cxxDecl->bases()) {
SpirvInstruction *subValue = nullptr;
StageVarDataBundle memberVarData = stageVarData;
memberVarData.decl = base.getType()->getAsCXXRecordDecl();
memberVarData.type = base.getType();
if (!createStageVars(memberVarData, true, &subValue, noWriteBack))
return nullptr;
subValues.push_back(subValue);
}
}
const auto *structDecl = stageVarData.type->getAs<RecordType>()->getDecl();
for (const auto *field : structDecl->fields()) {
SpirvInstruction *subValue = nullptr;
StageVarDataBundle memberVarData = stageVarData;
memberVarData.decl = field;
memberVarData.type = field->getType();
memberVarData.asNoInterp |= field->hasAttr<HLSLNoInterpolationAttr>();
if (!createStageVars(memberVarData, true, &subValue, noWriteBack))
return nullptr;
subValues.push_back(subValue);
}
if (stageVarData.arraySize == 0) {
SpirvInstruction *value = spvBuilder.createCompositeConstruct(
stageVarData.type, subValues, stageVarData.decl->getLocation());
for (auto *subInstr : subValues)
spvBuilder.addPerVertexStgInputFuncVarEntry(subInstr, value);
return value;
}
// Handle the extra level of arrayness.
// We need to return an array of structs. But we get arrays of fields
// from visiting all fields. So now we need to extract all the elements
// at the same index of each field arrays and compose a new struct out
// of them.
const auto structType = stageVarData.type;
const auto arrayType = astContext.getConstantArrayType(
structType, llvm::APInt(32, stageVarData.arraySize),
clang::ArrayType::Normal, 0);
llvm::SmallVector<SpirvInstruction *, 16> arrayElements;
for (uint32_t arrayIndex = 0; arrayIndex < stageVarData.arraySize;
++arrayIndex) {
llvm::SmallVector<SpirvInstruction *, 8> fields;
// If we have base classes, we need to handle them first.
if (const auto *cxxDecl = stageVarData.type->getAsCXXRecordDecl()) {
uint32_t baseIndex = 0;
for (auto base : cxxDecl->bases()) {
const auto baseType = base.getType();
fields.push_back(spvBuilder.createCompositeExtract(
baseType, subValues[baseIndex++], {arrayIndex},
stageVarData.decl->getLocation()));
}
}
// Extract the element at index arrayIndex from each field
for (const auto *field : structDecl->fields()) {
const auto fieldType = field->getType();
fields.push_back(spvBuilder.createCompositeExtract(
fieldType,
subValues[getNumBaseClasses(stageVarData.type) +
field->getFieldIndex()],
{arrayIndex}, stageVarData.decl->getLocation()));
}
// Compose a new struct out of them
arrayElements.push_back(spvBuilder.createCompositeConstruct(
structType, fields, stageVarData.decl->getLocation()));
}
return spvBuilder.createCompositeConstruct(arrayType, arrayElements,
stageVarData.decl->getLocation());
}
void DeclResultIdMapper::storeToShaderOutputVariable(
SpirvVariable *varInstr, SpirvInstruction *value,
const StageVarDataBundle &stageVarData) {
SpirvInstruction *ptr = varInstr;
// Since boolean output stage variables are represented as unsigned
// integers, we must cast the value to uint before storing.
if (isBooleanStageIOVar(stageVarData.decl, stageVarData.type,
stageVarData.semantic->getKind(),
stageVarData.sigPoint->GetKind())) {
QualType finalType = varInstr->getAstResultType();
if (stageVarData.arraySize != 0) {
// We assume that we will only have to write to a single value of the
// array, so we have to cast to the element type of the array, and not the
// array type.
assert(stageVarData.invocationId.hasValue());
finalType = finalType->getAsArrayTypeUnsafe()->getElementType();
}
value = theEmitter.castToType(value, stageVarData.type, finalType,
stageVarData.decl->getLocation());
}
// Special handling of SV_TessFactor HS patch constant output.
// TessLevelOuter is always an array of size 4 in SPIR-V, but
// SV_TessFactor could be an array of size 2, 3, or 4 in HLSL. Only the
// relevant indexes must be written to.
if (stageVarData.semantic->getKind() == hlsl::Semantic::Kind::TessFactor &&
hlsl::GetArraySize(stageVarData.type) != 4) {
const auto tessFactorSize = hlsl::GetArraySize(stageVarData.type);
for (uint32_t i = 0; i < tessFactorSize; ++i) {
ptr = spvBuilder.createAccessChain(
astContext.FloatTy, varInstr,
{spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, i))},
stageVarData.decl->getLocation());
spvBuilder.createStore(
ptr,
spvBuilder.createCompositeExtract(astContext.FloatTy, value, {i},
stageVarData.decl->getLocation()),
stageVarData.decl->getLocation());
}
}
// Special handling of SV_InsideTessFactor HS patch constant output.
// TessLevelInner is always an array of size 2 in SPIR-V, but
// SV_InsideTessFactor could be an array of size 1 (scalar) or size 2 in
// HLSL. If SV_InsideTessFactor is a scalar, only write to index 0 of
// TessLevelInner.
else if (stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::InsideTessFactor &&
// Some developers use float[1] instead of a scalar float.
(!stageVarData.type->isArrayType() ||
hlsl::GetArraySize(stageVarData.type) == 1)) {
ptr = spvBuilder.createAccessChain(
astContext.FloatTy, varInstr,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0)),
stageVarData.decl->getLocation());
if (stageVarData.type->isArrayType()) // float[1]
value = spvBuilder.createCompositeExtract(
astContext.FloatTy, value, {0}, stageVarData.decl->getLocation());
spvBuilder.createStore(ptr, value, stageVarData.decl->getLocation());
}
// Special handling of SV_Coverage, which is an unit value. We need to
// write it to the first element in the SampleMask builtin.
else if (stageVarData.semantic->getKind() == hlsl::Semantic::Kind::Coverage) {
ptr = spvBuilder.createAccessChain(
stageVarData.type, varInstr,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0)),
stageVarData.decl->getLocation());
ptr->setStorageClass(spv::StorageClass::Output);
spvBuilder.createStore(ptr, value, stageVarData.decl->getLocation());
}
// Special handling of HS ouput, for which we write to only one
// element in the per-vertex data array: the one indexed by
// SV_ControlPointID.
else if (stageVarData.invocationId.hasValue() &&
stageVarData.invocationId.getValue() != nullptr) {
// Remove the arrayness to get the element type.
assert(isa<ConstantArrayType>(varInstr->getAstResultType()));
const auto elementType =
astContext.getAsArrayType(varInstr->getAstResultType())
->getElementType();
auto index = stageVarData.invocationId.getValue();
ptr = spvBuilder.createAccessChain(elementType, varInstr, index,
stageVarData.decl->getLocation());
ptr->setStorageClass(spv::StorageClass::Output);
spvBuilder.createStore(ptr, value, stageVarData.decl->getLocation());
}
// For all normal cases
else {
spvBuilder.createStore(ptr, value, stageVarData.decl->getLocation());
}
}
SpirvInstruction *DeclResultIdMapper::loadShaderInputVariable(
SpirvVariable *varInstr, const StageVarDataBundle &stageVarData) {
SpirvInstruction *load = spvBuilder.createLoad(
varInstr->getAstResultType(), varInstr, stageVarData.decl->getLocation());
// Fix ups for corner cases
// Special handling of SV_TessFactor DS patch constant input.
// TessLevelOuter is always an array of size 4 in SPIR-V, but
// SV_TessFactor could be an array of size 2, 3, or 4 in HLSL. Only the
// relevant indexes must be loaded.
if (stageVarData.semantic->getKind() == hlsl::Semantic::Kind::TessFactor &&
hlsl::GetArraySize(stageVarData.type) != 4) {
llvm::SmallVector<SpirvInstruction *, 4> components;
const auto tessFactorSize = hlsl::GetArraySize(stageVarData.type);
const auto arrType = astContext.getConstantArrayType(
astContext.FloatTy, llvm::APInt(32, tessFactorSize),
clang::ArrayType::Normal, 0);
for (uint32_t i = 0; i < tessFactorSize; ++i)
components.push_back(spvBuilder.createCompositeExtract(
astContext.FloatTy, load, {i}, stageVarData.decl->getLocation()));
load = spvBuilder.createCompositeConstruct(
arrType, components, stageVarData.decl->getLocation());
}
// Special handling of SV_InsideTessFactor DS patch constant input.
// TessLevelInner is always an array of size 2 in SPIR-V, but
// SV_InsideTessFactor could be an array of size 1 (scalar) or size 2 in
// HLSL. If SV_InsideTessFactor is a scalar, only extract index 0 of
// TessLevelInner.
else if (stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::InsideTessFactor &&
// Some developers use float[1] instead of a scalar float.
(!stageVarData.type->isArrayType() ||
hlsl::GetArraySize(stageVarData.type) == 1)) {
load = spvBuilder.createCompositeExtract(astContext.FloatTy, load, {0},
stageVarData.decl->getLocation());
if (stageVarData.type->isArrayType()) { // float[1]
const auto arrType = astContext.getConstantArrayType(
astContext.FloatTy, llvm::APInt(32, 1), clang::ArrayType::Normal, 0);
load = spvBuilder.createCompositeConstruct(
arrType, {load}, stageVarData.decl->getLocation());
}
}
// SV_DomainLocation can refer to a float2 or a float3, whereas TessCoord
// is always a float3. To ensure SPIR-V validity, a float3 stage variable
// is created, and we must extract a float2 from it before passing it to
// the main function.
else if (stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::DomainLocation &&
hlsl::GetHLSLVecSize(stageVarData.type) != 3) {
const auto domainLocSize = hlsl::GetHLSLVecSize(stageVarData.type);
load = spvBuilder.createVectorShuffle(
astContext.getExtVectorType(astContext.FloatTy, domainLocSize), load,
load, {0, 1}, stageVarData.decl->getLocation());
}
// Special handling of SV_Coverage, which is an uint value. We need to
// read SampleMask and extract its first element.
else if (stageVarData.semantic->getKind() == hlsl::Semantic::Kind::Coverage) {
load = spvBuilder.createCompositeExtract(stageVarData.type, load, {0},
stageVarData.decl->getLocation());
}
// Special handling of SV_InnerCoverage, which is an uint value. We need
// to read FullyCoveredEXT, which is a boolean value, and convert it to an
// uint value. According to D3D12 "Conservative Rasterization" doc: "The
// Pixel Shader has a 32-bit scalar integer System Generate Value
// available: InnerCoverage. This is a bit-field that has bit 0 from the
// LSB set to 1 for a given conservatively rasterized pixel, only when
// that pixel is guaranteed to be entirely inside the current primitive.
// All other input register bits must be set to 0 when bit 0 is not set,
// but are undefined when bit 0 is set to 1 (essentially, this bit-field
// represents a Boolean value where false must be exactly 0, but true can
// be any odd (i.e. bit 0 set) non-zero value)."
else if (stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::InnerCoverage) {
const auto constOne =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 1));
const auto constZero =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
load = spvBuilder.createSelect(astContext.UnsignedIntTy, load, constOne,
constZero, stageVarData.decl->getLocation());
}
// Special handling of SV_Barycentrics, which is a float3, but the
// The 3 values are NOT guaranteed to add up to floating-point 1.0
// exactly. Calculate the third element here.
else if (stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::Barycentrics) {
const auto x = spvBuilder.createCompositeExtract(
astContext.FloatTy, load, {0}, stageVarData.decl->getLocation());
const auto y = spvBuilder.createCompositeExtract(
astContext.FloatTy, load, {1}, stageVarData.decl->getLocation());
const auto xy =
spvBuilder.createBinaryOp(spv::Op::OpFAdd, astContext.FloatTy, x, y,
stageVarData.decl->getLocation());
const auto z = spvBuilder.createBinaryOp(
spv::Op::OpFSub, astContext.FloatTy,
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(1.0f)),
xy, stageVarData.decl->getLocation());
load = spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(astContext.FloatTy, 3), {x, y, z},
stageVarData.decl->getLocation());
}
// Special handling of SV_DispatchThreadID and SV_GroupThreadID, which may
// be a uint or uint2, but the underlying stage input variable is a uint3.
// The last component(s) should be discarded in needed.
else if ((stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::DispatchThreadID ||
stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::GroupThreadID ||
stageVarData.semantic->getKind() ==
hlsl::Semantic::Kind::GroupID) &&
(!hlsl::IsHLSLVecType(stageVarData.type) ||
hlsl::GetHLSLVecSize(stageVarData.type) != 3)) {
const auto srcVecElemType =
hlsl::IsHLSLVecType(stageVarData.type)
? hlsl::GetHLSLVecElementType(stageVarData.type)
: stageVarData.type;
const auto vecSize = hlsl::IsHLSLVecType(stageVarData.type)
? hlsl::GetHLSLVecSize(stageVarData.type)
: 1;
if (vecSize == 1)
load = spvBuilder.createCompositeExtract(
srcVecElemType, load, {0}, stageVarData.decl->getLocation());
else if (vecSize == 2)
load = spvBuilder.createVectorShuffle(
astContext.getExtVectorType(srcVecElemType, 2), load, load, {0, 1},
stageVarData.decl->getLocation());
}
// Reciprocate SV_Position.w if requested
if (stageVarData.semantic->getKind() == hlsl::Semantic::Kind::Position)
load = invertWIfRequested(load, stageVarData.decl->getLocation());
// Since boolean stage input variables are represented as unsigned
// integers, after loading them, we should cast them to boolean.
if (isBooleanStageIOVar(stageVarData.decl, stageVarData.type,
stageVarData.semantic->getKind(),
stageVarData.sigPoint->GetKind())) {
if (stageVarData.arraySize == 0) {
load = theEmitter.castToType(load, varInstr->getAstResultType(),
stageVarData.type,
stageVarData.decl->getLocation());
} else {
llvm::SmallVector<SpirvInstruction *, 8> fields;
SourceLocation loc = stageVarData.decl->getLocation();
QualType originalScalarType = varInstr->getAstResultType()
->castAsArrayTypeUnsafe()
->getElementType();
for (uint32_t idx = 0; idx < stageVarData.arraySize; ++idx) {
SpirvInstruction *field = spvBuilder.createCompositeExtract(
originalScalarType, load, {idx}, loc);
field = theEmitter.castToType(field, field->getAstResultType(),
stageVarData.type, loc);
fields.push_back(field);
}
QualType finalType = astContext.getConstantArrayType(
stageVarData.type, llvm::APInt(32, stageVarData.arraySize),
clang::ArrayType::Normal, 0);
load = spvBuilder.createCompositeConstruct(finalType, fields, loc);
}
}
return load;
}
bool DeclResultIdMapper::validateShaderStageVar(
const StageVarDataBundle &stageVarData) {
if (!validateVKAttributes(stageVarData.decl))
return false;
if (!isValidSemanticInShaderModel(stageVarData)) {
emitError("invalid usage of semantic '%0' in shader profile %1",
stageVarData.decl->getLocation())
<< stageVarData.semantic->str
<< hlsl::ShaderModel::GetKindName(
spvContext.getCurrentShaderModelKind());
return false;
}
if (!validateVKBuiltins(stageVarData))
return false;
if (!validateShaderStageVarType(stageVarData))
return false;
return true;
}
bool DeclResultIdMapper::validateVKAttributes(const NamedDecl *decl) {
bool success = true;
if (const auto *idxAttr = decl->getAttr<VKIndexAttr>()) {
if (!spvContext.isPS()) {
emitError("vk::index only allowed in pixel shader",
idxAttr->getLocation());
success = false;
}
const auto *locAttr = decl->getAttr<VKLocationAttr>();
if (!locAttr) {
emitError("vk::index should be used together with vk::location for "
"dual-source blending",
idxAttr->getLocation());
success = false;
} else {
const auto locNumber = locAttr->getNumber();
if (locNumber != 0) {
emitError("dual-source blending should use vk::location 0",
locAttr->getLocation());
success = false;
}
}
const auto idxNumber = idxAttr->getNumber();
if (idxNumber != 0 && idxNumber != 1) {
emitError("dual-source blending only accepts 0 or 1 as vk::index",
idxAttr->getLocation());
success = false;
}
}
return success;
}
bool DeclResultIdMapper::validateVKBuiltins(
const StageVarDataBundle &stageVarData) {
bool success = true;
if (const auto *builtinAttr = stageVarData.decl->getAttr<VKBuiltInAttr>()) {
// The front end parsing only allows vk::builtin to be attached to a
// function/parameter/variable; all of them are DeclaratorDecls.
const auto declType =
getTypeOrFnRetType(cast<DeclaratorDecl>(stageVarData.decl));
const auto loc = builtinAttr->getLocation();
if (stageVarData.decl->hasAttr<VKLocationAttr>()) {
emitError("cannot use vk::builtin and vk::location together", loc);
success = false;
}
const llvm::StringRef builtin = builtinAttr->getBuiltIn();
if (builtin == "HelperInvocation") {
if (!declType->isBooleanType()) {
emitError("HelperInvocation builtin must be of boolean type", loc);
success = false;
}
if (stageVarData.sigPoint->GetKind() != hlsl::SigPoint::Kind::PSIn) {
emitError(
"HelperInvocation builtin can only be used as pixel shader input",
loc);
success = false;
}
} else if (builtin == "PointSize") {
if (!declType->isFloatingType()) {
emitError("PointSize builtin must be of float type", loc);
success = false;
}
switch (stageVarData.sigPoint->GetKind()) {
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::DSOut:
case hlsl::SigPoint::Kind::GSVIn:
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::PSIn:
case hlsl::SigPoint::Kind::MSOut:
break;
default:
emitError("PointSize builtin cannot be used as %0", loc)
<< stageVarData.sigPoint->GetName();
success = false;
}
} else if (builtin == "BaseVertex" || builtin == "BaseInstance" ||
builtin == "DrawIndex") {
if (!declType->isSpecificBuiltinType(BuiltinType::Kind::Int) &&
!declType->isSpecificBuiltinType(BuiltinType::Kind::UInt)) {
emitError("%0 builtin must be of 32-bit scalar integer type", loc)
<< builtin;
success = false;
}
switch (stageVarData.sigPoint->GetKind()) {
case hlsl::SigPoint::Kind::VSIn:
break;
case hlsl::SigPoint::Kind::MSIn:
case hlsl::SigPoint::Kind::ASIn:
if (builtin != "DrawIndex") {
emitError("%0 builtin cannot be used as %1", loc)
<< builtin << stageVarData.sigPoint->GetName();
success = false;
}
break;
default:
emitError("%0 builtin cannot be used as %1", loc)
<< builtin << stageVarData.sigPoint->GetName();
success = false;
}
} else if (builtin == "DeviceIndex") {
if (getStorageClassForSigPoint(stageVarData.sigPoint) !=
spv::StorageClass::Input) {
emitError("%0 builtin can only be used as shader input", loc)
<< builtin;
success = false;
}
if (!declType->isSpecificBuiltinType(BuiltinType::Kind::Int) &&
!declType->isSpecificBuiltinType(BuiltinType::Kind::UInt)) {
emitError("%0 builtin must be of 32-bit scalar integer type", loc)
<< builtin;
success = false;
}
} else if (builtin == "ViewportMaskNV") {
if (stageVarData.sigPoint->GetKind() != hlsl::SigPoint::Kind::MSPOut) {
emitError("%0 builtin can only be used as 'primitives' output in MS",
loc)
<< builtin;
success = false;
}
if (!declType->isArrayType() ||
!declType->getArrayElementTypeNoTypeQual()->isSpecificBuiltinType(
BuiltinType::Kind::Int)) {
emitError("%0 builtin must be of type array of integers", loc)
<< builtin;
success = false;
}
}
}
return success;
}
bool DeclResultIdMapper::validateShaderStageVarType(
const StageVarDataBundle &stageVarData) {
switch (stageVarData.semantic->getKind()) {
case hlsl::Semantic::Kind::InnerCoverage:
if (!stageVarData.type->isSpecificBuiltinType(BuiltinType::UInt)) {
emitError("SV_InnerCoverage must be of uint type.",
stageVarData.decl->getLocation());
return false;
}
break;
default:
break;
}
return true;
}
bool DeclResultIdMapper::isValidSemanticInShaderModel(
const StageVarDataBundle &stageVarData) {
// Error out when the given semantic is invalid in this shader model
if (hlsl::SigPoint::GetInterpretation(
stageVarData.semantic->getKind(), stageVarData.sigPoint->GetKind(),
spvContext.getMajorVersion(), spvContext.getMinorVersion()) ==
hlsl::DXIL::SemanticInterpretationKind::NA) {
// Special handle MSIn/ASIn allowing VK-only builtin "DrawIndex".
switch (stageVarData.sigPoint->GetKind()) {
case hlsl::SigPoint::Kind::MSIn:
case hlsl::SigPoint::Kind::ASIn:
if (const auto *builtinAttr =
stageVarData.decl->getAttr<VKBuiltInAttr>()) {
const llvm::StringRef builtin = builtinAttr->getBuiltIn();
if (builtin == "DrawIndex") {
break;
}
}
LLVM_FALLTHROUGH;
default:
return false;
}
}
return true;
}
SpirvVariable *DeclResultIdMapper::getInstanceIdFromIndexAndBase(
SpirvVariable *instanceIndexVar, SpirvVariable *baseInstanceVar) {
QualType type = instanceIndexVar->getAstResultType();
auto *instanceIdVar = spvBuilder.addFnVar(
type, instanceIndexVar->getSourceLocation(), "SV_InstanceID");
auto *instanceIndexValue = spvBuilder.createLoad(
type, instanceIndexVar, instanceIndexVar->getSourceLocation());
auto *baseInstanceValue = spvBuilder.createLoad(
type, baseInstanceVar, instanceIndexVar->getSourceLocation());
auto *instanceIdValue = spvBuilder.createBinaryOp(
spv::Op::OpISub, type, instanceIndexValue, baseInstanceValue,
instanceIndexVar->getSourceLocation());
spvBuilder.createStore(instanceIdVar, instanceIdValue,
instanceIndexVar->getSourceLocation());
return instanceIdVar;
}
SpirvVariable *
DeclResultIdMapper::getVertexIdFromIndexAndBase(SpirvVariable *vertexIndexVar,
SpirvVariable *baseVertexVar) {
QualType type = vertexIndexVar->getAstResultType();
auto *vertexIdVar = spvBuilder.addFnVar(
type, vertexIndexVar->getSourceLocation(), "SV_VertexID");
auto *vertexIndexValue = spvBuilder.createLoad(
type, vertexIndexVar, vertexIndexVar->getSourceLocation());
auto *baseVertexValue = spvBuilder.createLoad(
type, baseVertexVar, vertexIndexVar->getSourceLocation());
auto *vertexIdValue = spvBuilder.createBinaryOp(
spv::Op::OpISub, type, vertexIndexValue, baseVertexValue,
vertexIndexVar->getSourceLocation());
spvBuilder.createStore(vertexIdVar, vertexIdValue,
vertexIndexVar->getSourceLocation());
return vertexIdVar;
}
SpirvVariable *
DeclResultIdMapper::getBaseInstanceVariable(const hlsl::SigPoint *sigPoint,
QualType type) {
assert(type->isSpecificBuiltinType(BuiltinType::Kind::Int) ||
type->isSpecificBuiltinType(BuiltinType::Kind::UInt));
auto *baseInstanceVar = spvBuilder.addStageBuiltinVar(
type, spv::StorageClass::Input, spv::BuiltIn::BaseInstance, false, {});
StageVar var(sigPoint, {}, nullptr, type,
getLocationAndComponentCount(astContext, type));
var.setSpirvInstr(baseInstanceVar);
var.setIsSpirvBuiltin();
stageVars.push_back(var);
return baseInstanceVar;
}
SpirvVariable *
DeclResultIdMapper::getBaseVertexVariable(const hlsl::SigPoint *sigPoint,
QualType type) {
assert(type->isSpecificBuiltinType(BuiltinType::Kind::Int) ||
type->isSpecificBuiltinType(BuiltinType::Kind::UInt));
auto *baseVertexVar = spvBuilder.addStageBuiltinVar(
type, spv::StorageClass::Input, spv::BuiltIn::BaseVertex, false, {});
StageVar var(sigPoint, {}, nullptr, type,
getLocationAndComponentCount(astContext, type));
var.setSpirvInstr(baseVertexVar);
var.setIsSpirvBuiltin();
stageVars.push_back(var);
return baseVertexVar;
}
SpirvVariable *DeclResultIdMapper::createSpirvInterfaceVariable(
const StageVarDataBundle &stageVarData) {
// The evalType will be the type of the interface variable in SPIR-V.
// The type of the variable used in the body of the function will still be
// `stageVarData.type`.
QualType evalType = getTypeForSpirvStageVariable(stageVarData);
const auto *builtinAttr = stageVarData.decl->getAttr<VKBuiltInAttr>();
StageVar stageVar(
stageVarData.sigPoint, *stageVarData.semantic, builtinAttr, evalType,
// For HS/DS/GS, we have already stripped the outmost arrayness on type.
getLocationAndComponentCount(astContext, stageVarData.type));
const auto name =
stageVarData.namePrefix.str() + "." + stageVar.getSemanticStr();
SpirvVariable *varInstr = createSpirvStageVar(
&stageVar, stageVarData.decl, name, stageVarData.semantic->loc);
if (!varInstr)
return nullptr;
if (stageVarData.asNoInterp)
varInstr->setNoninterpolated();
stageVar.setSpirvInstr(varInstr);
stageVar.setLocationAttr(stageVarData.decl->getAttr<VKLocationAttr>());
stageVar.setIndexAttr(stageVarData.decl->getAttr<VKIndexAttr>());
if (stageVar.getStorageClass() == spv::StorageClass::Input ||
stageVar.getStorageClass() == spv::StorageClass::Output) {
stageVar.setEntryPoint(entryFunction);
}
decorateStageVarWithIntrinsicAttrs(stageVarData.decl, &stageVar, varInstr);
stageVars.push_back(stageVar);
// Emit OpDecorate* instructions to link this stage variable with the HLSL
// semantic it is created for
spvBuilder.decorateHlslSemantic(varInstr, stageVar.getSemanticStr());
// TODO: the following may not be correct?
if (stageVarData.sigPoint->GetSignatureKind() ==
hlsl::DXIL::SignatureKind::PatchConstOrPrim) {
if (stageVarData.sigPoint->GetKind() == hlsl::SigPoint::Kind::MSPOut) {
// Decorate with PerPrimitiveNV for per-primitive out variables.
spvBuilder.decoratePerPrimitiveNV(varInstr,
varInstr->getSourceLocation());
} else {
spvBuilder.decoratePatch(varInstr, varInstr->getSourceLocation());
}
}
// Decorate with interpolation modes for pixel shader input variables, vertex
// shader output variables, or mesh shader output variables.
if ((spvContext.isPS() && stageVarData.sigPoint->IsInput()) ||
(spvContext.isVS() && stageVarData.sigPoint->IsOutput()) ||
(spvContext.isMS() && stageVarData.sigPoint->IsOutput()))
decorateInterpolationMode(stageVarData.decl, stageVarData.type, varInstr,
*stageVarData.semantic);
// Special case: The DX12 SV_InstanceID always counts from 0, even if the
// StartInstanceLocation parameter is non-zero. gl_InstanceIndex, however,
// starts from firstInstance. Thus it doesn't emulate actual DX12 shader
// behavior. To make it equivalent, SPIR-V codegen should emit:
// SV_InstanceID = gl_InstanceIndex - gl_BaseInstance
// As a result, we have to manually create a second stage variable for this
// specific case.
//
// According to the Vulkan spec on builtin variables:
// www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#interfaces-builtin-variables
//
// InstanceIndex:
// Decorating a variable in a vertex shader with the InstanceIndex
// built-in decoration will make that variable contain the index of the
// instance that is being processed by the current vertex shader
// invocation. InstanceIndex begins at the firstInstance.
// BaseInstance
// Decorating a variable with the BaseInstance built-in will make that
// variable contain the integer value corresponding to the first instance
// that was passed to the command that invoked the current vertex shader
// invocation. BaseInstance is the firstInstance parameter to a direct
// drawing command or the firstInstance member of a structure consumed by
// an indirect drawing command.
if (spirvOptions.supportNonzeroBaseInstance &&
stageVarData.semantic->getKind() == hlsl::Semantic::Kind::InstanceID &&
stageVarData.sigPoint->GetKind() == hlsl::SigPoint::Kind::VSIn) {
// The above call to createSpirvStageVar creates the gl_InstanceIndex.
// We should now manually create the gl_BaseInstance variable and do the
// subtraction.
auto *baseInstanceVar =
getBaseInstanceVariable(stageVarData.sigPoint, stageVarData.type);
// SPIR-V code for 'SV_InstanceID = gl_InstanceIndex - gl_BaseInstance'
varInstr = getInstanceIdFromIndexAndBase(varInstr, baseInstanceVar);
}
if (spirvOptions.supportNonzeroBaseVertex &&
stageVarData.semantic->getKind() == hlsl::Semantic::Kind::VertexID &&
stageVarData.sigPoint->GetKind() == hlsl::SigPoint::Kind::VSIn) {
auto *baseVertexVar =
getBaseVertexVariable(stageVarData.sigPoint, stageVarData.type);
// SPIR-V code for 'SV_VertexID = gl_VertexIndex - gl_BaseVertex'
varInstr = getVertexIdFromIndexAndBase(varInstr, baseVertexVar);
}
// We have semantics attached to this decl, which means it must be a
// function/parameter/variable. All are DeclaratorDecls.
stageVarInstructions[cast<DeclaratorDecl>(stageVarData.decl)] = varInstr;
return varInstr;
}
QualType DeclResultIdMapper::getTypeForSpirvStageVariable(
const StageVarDataBundle &stageVarData) {
QualType evalType = stageVarData.type;
switch (stageVarData.semantic->getKind()) {
case hlsl::Semantic::Kind::DomainLocation:
// SV_DomainLocation can refer to a float2, whereas TessCoord is a float3.
// To ensure SPIR-V validity, we must create a float3 and extract a
// float2 from it before passing it to the main function.
evalType = astContext.getExtVectorType(astContext.FloatTy, 3);
break;
case hlsl::Semantic::Kind::TessFactor:
// SV_TessFactor is an array of size 2 for isoline patch, array of size 3
// for tri patch, and array of size 4 for quad patch, but it must always
// be an array of size 4 in SPIR-V for Vulkan.
evalType = astContext.getConstantArrayType(
astContext.FloatTy, llvm::APInt(32, 4), clang::ArrayType::Normal, 0);
break;
case hlsl::Semantic::Kind::InsideTessFactor:
// SV_InsideTessFactor is a single float for tri patch, and an array of
// size 2 for a quad patch, but it must always be an array of size 2 in
// SPIR-V for Vulkan.
evalType = astContext.getConstantArrayType(
astContext.FloatTy, llvm::APInt(32, 2), clang::ArrayType::Normal, 0);
break;
case hlsl::Semantic::Kind::Coverage:
// SV_Coverage is an uint value, but the SPIR-V builtin it corresponds to,
// SampleMask, must be an array of integers.
evalType = astContext.getConstantArrayType(astContext.UnsignedIntTy,
llvm::APInt(32, 1),
clang::ArrayType::Normal, 0);
break;
case hlsl::Semantic::Kind::InnerCoverage:
// SV_InnerCoverage is an uint value, but the corresponding SPIR-V builtin,
// FullyCoveredEXT, must be an boolean value.
evalType = astContext.BoolTy;
break;
case hlsl::Semantic::Kind::Barycentrics:
evalType = astContext.getExtVectorType(astContext.FloatTy, 3);
break;
case hlsl::Semantic::Kind::DispatchThreadID:
case hlsl::Semantic::Kind::GroupThreadID:
case hlsl::Semantic::Kind::GroupID:
// SV_DispatchThreadID, SV_GroupThreadID, and SV_GroupID are allowed to be
// uint, uint2, or uint3, but the corresponding SPIR-V builtins
// (GlobalInvocationId, LocalInvocationId, WorkgroupId) must be a uint3.
// Keep the original integer signedness
evalType = astContext.getExtVectorType(
hlsl::IsHLSLVecType(stageVarData.type)
? hlsl::GetHLSLVecElementType(stageVarData.type)
: stageVarData.type,
3);
break;
default:
// Other semantic kinds can keep the original type.
break;
}
// Boolean stage I/O variables must be represented as unsigned integers.
// Boolean built-in variables are represented as bool.
if (isBooleanStageIOVar(stageVarData.decl, stageVarData.type,
stageVarData.semantic->getKind(),
stageVarData.sigPoint->GetKind())) {
evalType = getUintTypeWithSourceComponents(astContext, stageVarData.type);
}
// Handle the extra arrayness
if (stageVarData.arraySize != 0) {
evalType = astContext.getConstantArrayType(
evalType, llvm::APInt(32, stageVarData.arraySize),
clang::ArrayType::Normal, 0);
}
return evalType;
}
bool DeclResultIdMapper::createStageVars(StageVarDataBundle &stageVarData,
bool asInput, SpirvInstruction **value,
bool noWriteBack) {
assert(value);
// invocationId should only be used for handling HS per-vertex output.
if (stageVarData.invocationId.hasValue()) {
assert(spvContext.isHS() && stageVarData.arraySize != 0 && !asInput);
}
assert(stageVarData.semantic);
if (stageVarData.type->isVoidType()) {
// No stage variables will be created for void type.
return true;
}
// We have several cases regarding HLSL semantics to handle here:
// * If the current decl inherits a semantic from some enclosing entity,
// use the inherited semantic no matter whether there is a semantic
// attached to the current decl.
// * If there is no semantic to inherit,
// * If the current decl is a struct,
// * If the current decl has a semantic, all its members inherit this
// decl's semantic, with the index sequentially increasing;
// * If the current decl does not have a semantic, all its members
// should have semantics attached;
// * If the current decl is not a struct, it should have semantic attached.
auto thisSemantic = getStageVarSemantic(stageVarData.decl);
// Which semantic we should use for this decl
// Enclosing semantics override internal ones
if (stageVarData.semantic->isValid()) {
if (thisSemantic.isValid()) {
emitWarning(
"internal semantic '%0' overridden by enclosing semantic '%1'",
thisSemantic.loc)
<< thisSemantic.str << stageVarData.semantic->str;
}
} else {
stageVarData.semantic = &thisSemantic;
}
if (stageVarData.semantic->isValid() &&
// Structs with attached semantics will be handled later.
!stageVarData.type->isStructureType()) {
// Found semantic attached directly to this Decl. This means we need to
// map this decl to a single stage variable.
const auto semanticKind = stageVarData.semantic->getKind();
const auto sigPointKind = stageVarData.sigPoint->GetKind();
if (!validateShaderStageVar(stageVarData)) {
return false;
}
// Special handling of certain mappings between HLSL semantics and
// SPIR-V builtins:
// * SV_CullDistance/SV_ClipDistance are outsourced to GlPerVertex.
if (glPerVertex.tryToAccess(
sigPointKind, semanticKind, stageVarData.semantic->index,
stageVarData.invocationId, value, noWriteBack,
/*vecComponent=*/nullptr, stageVarData.decl->getLocation()))
return true;
SpirvVariable *varInstr = createSpirvInterfaceVariable(stageVarData);
if (!varInstr) {
return false;
}
// Mark that we have used one index for this semantic
++stageVarData.semantic->index;
if (asInput) {
*value = loadShaderInputVariable(varInstr, stageVarData);
if ((stageVarData.decl->hasAttr<HLSLNoInterpolationAttr>() ||
stageVarData.asNoInterp) &&
sigPointKind == hlsl::SigPoint::Kind::PSIn)
spvBuilder.addPerVertexStgInputFuncVarEntry(varInstr, *value);
} else {
if (noWriteBack)
return true;
// Negate SV_Position.y if requested
if (semanticKind == hlsl::Semantic::Kind::Position)
*value = theEmitter.invertYIfRequested(*value, thisSemantic.loc);
storeToShaderOutputVariable(varInstr, *value, stageVarData);
}
return true;
}
// If the decl itself doesn't have semantic string attached and there is no
// one to inherit, it should be a struct having all its fields with semantic
// strings.
if (!stageVarData.semantic->isValid() &&
!stageVarData.type->isStructureType()) {
emitError("semantic string missing for shader %select{output|input}0 "
"variable '%1'",
stageVarData.decl->getLocation())
<< asInput << stageVarData.decl->getName();
return false;
}
if (asInput) {
*value = createStructInputVar(stageVarData, noWriteBack);
return (*value) != nullptr;
} else {
return createStructOutputVar(stageVarData, *value, noWriteBack);
}
}
bool DeclResultIdMapper::createPayloadStageVars(
const hlsl::SigPoint *sigPoint, spv::StorageClass sc, const NamedDecl *decl,
bool asInput, QualType type, const llvm::StringRef namePrefix,
SpirvInstruction **value, uint32_t payloadMemOffset) {
assert(spvContext.isMS() || spvContext.isAS());
assert(value);
if (type->isVoidType()) {
// No stage variables will be created for void type.
return true;
}
const auto loc = decl->getLocation();
// Most struct type stage vars must be flattened, but for EXT_mesh_shaders the
// mesh payload struct should be decorated with TaskPayloadWorkgroupEXT and
// used directly as the OpEntryPoint variable.
if (!type->isStructureType() ||
featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
SpirvVariable *varInstr = nullptr;
// Check whether a mesh payload module variable has already been added, as
// is the case for the groupshared payload variable parameter of
// DispatchMesh. In this case, change the storage class from Workgroup to
// TaskPayloadWorkgroupEXT.
if (featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
for (SpirvVariable *moduleVar : spvBuilder.getModule()->getVariables()) {
if (moduleVar->getAstResultType() == type) {
moduleVar->setStorageClass(
spv::StorageClass::TaskPayloadWorkgroupEXT);
varInstr = moduleVar;
}
}
}
// If necessary, create new stage variable for mesh payload.
if (!varInstr) {
LocationAndComponent locationAndComponentCount =
type->isStructureType()
? LocationAndComponent({0, 0, false})
: getLocationAndComponentCount(astContext, type);
StageVar stageVar(sigPoint, /*semaInfo=*/{}, /*builtinAttr=*/nullptr,
type, locationAndComponentCount);
const auto name = namePrefix.str() + "." + decl->getNameAsString();
varInstr = spvBuilder.addStageIOVar(type, sc, name, /*isPrecise=*/false,
/*isNointerp=*/false, loc);
if (!varInstr)
return false;
// Even though these as user defined IO stage variables, set them as
// SPIR-V builtins in order to bypass any semantic string checks and
// location assignment.
stageVar.setIsSpirvBuiltin();
stageVar.setSpirvInstr(varInstr);
if (stageVar.getStorageClass() == spv::StorageClass::Input ||
stageVar.getStorageClass() == spv::StorageClass::Output) {
stageVar.setEntryPoint(entryFunction);
}
stageVars.push_back(stageVar);
if (!featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
// Decorate with PerTaskNV for mesh/amplification shader payload
// variables.
spvBuilder.decoratePerTaskNV(varInstr, payloadMemOffset,
varInstr->getSourceLocation());
}
}
if (asInput) {
*value = spvBuilder.createLoad(type, varInstr, loc);
} else {
spvBuilder.createStore(varInstr, *value, loc);
}
return true;
}
// This decl translates into multiple stage input/output payload variables
// and we need to load/store these individual member variables.
const auto *structDecl = type->getAs<RecordType>()->getDecl();
llvm::SmallVector<SpirvInstruction *, 4> subValues;
AlignmentSizeCalculator alignmentCalc(astContext, spirvOptions);
uint32_t nextMemberOffset = 0;
for (const auto *field : structDecl->fields()) {
const auto fieldType = field->getType();
SpirvInstruction *subValue = nullptr;
uint32_t memberAlignment = 0, memberSize = 0, stride = 0;
// The next avaiable offset after laying out the previous members.
std::tie(memberAlignment, memberSize) = alignmentCalc.getAlignmentAndSize(
field->getType(), spirvOptions.ampPayloadLayoutRule,
/*isRowMajor*/ llvm::None, &stride);
alignmentCalc.alignUsingHLSLRelaxedLayout(
field->getType(), memberSize, memberAlignment, &nextMemberOffset);
// The vk::offset attribute takes precedence over all.
if (field->getAttr<VKOffsetAttr>()) {
nextMemberOffset = field->getAttr<VKOffsetAttr>()->getOffset();
}
// Each payload member must have an Offset Decoration.
payloadMemOffset = nextMemberOffset;
nextMemberOffset += memberSize;
if (!asInput) {
subValue = spvBuilder.createCompositeExtract(
fieldType, *value, {getNumBaseClasses(type) + field->getFieldIndex()},
loc);
}
if (!createPayloadStageVars(sigPoint, sc, field, asInput, field->getType(),
namePrefix, &subValue, payloadMemOffset))
return false;
if (asInput) {
subValues.push_back(subValue);
}
}
if (asInput) {
*value = spvBuilder.createCompositeConstruct(type, subValues, loc);
}
return true;
}
bool DeclResultIdMapper::writeBackOutputStream(const NamedDecl *decl,
QualType type,
SpirvInstruction *value,
SourceRange range) {
assert(spvContext.isGS()); // Only for GS use
if (hlsl::IsHLSLStreamOutputType(type))
type = hlsl::GetHLSLResourceResultType(type);
if (hasGSPrimitiveTypeQualifier(decl))
type = astContext.getAsConstantArrayType(type)->getElementType();
auto semanticInfo = getStageVarSemantic(decl);
const auto loc = decl->getLocation();
if (semanticInfo.isValid()) {
// Found semantic attached directly to this Decl. Write the value for this
// Decl to the corresponding stage output variable.
// Handle SV_ClipDistance, and SV_CullDistance
if (glPerVertex.tryToAccess(
hlsl::DXIL::SigPointKind::GSOut, semanticInfo.semantic->GetKind(),
semanticInfo.index, llvm::None, &value,
/*noWriteBack=*/false, /*vecComponent=*/nullptr, loc, range))
return true;
// Query the <result-id> for the stage output variable generated out
// of this decl.
// We have semantic string attached to this decl; therefore, it must be a
// DeclaratorDecl.
const auto found = stageVarInstructions.find(cast<DeclaratorDecl>(decl));
// We should have recorded its stage output variable previously.
assert(found != stageVarInstructions.end());
// Negate SV_Position.y if requested
if (semanticInfo.semantic->GetKind() == hlsl::Semantic::Kind::Position)
value = theEmitter.invertYIfRequested(value, loc, range);
// Boolean stage output variables are represented as unsigned integers.
if (isBooleanStageIOVar(decl, type, semanticInfo.semantic->GetKind(),
hlsl::SigPoint::Kind::GSOut)) {
QualType uintType = getUintTypeWithSourceComponents(astContext, type);
value = theEmitter.castToType(value, type, uintType, loc, range);
}
spvBuilder.createStore(found->second, value, loc, range);
return true;
}
// If the decl itself doesn't have semantic string attached, it should be
// a struct having all its fields with semantic strings.
if (!type->isStructureType()) {
emitError("semantic string missing for shader output variable '%0'", loc)
<< decl->getName();
return false;
}
// If we have base classes, we need to handle them first.
if (const auto *cxxDecl = type->getAsCXXRecordDecl()) {
uint32_t baseIndex = 0;
for (auto base : cxxDecl->bases()) {
auto *subValue = spvBuilder.createCompositeExtract(
base.getType(), value, {baseIndex++}, loc, range);
if (!writeBackOutputStream(base.getType()->getAsCXXRecordDecl(),
base.getType(), subValue, range))
return false;
}
}
const auto *structDecl = type->getAs<RecordType>()->getDecl();
// Write out each field
for (const auto *field : structDecl->fields()) {
const auto fieldType = field->getType();
auto *subValue = spvBuilder.createCompositeExtract(
fieldType, value, {getNumBaseClasses(type) + field->getFieldIndex()},
loc, range);
if (!writeBackOutputStream(field, field->getType(), subValue, range))
return false;
}
return true;
}
SpirvInstruction *
DeclResultIdMapper::invertWIfRequested(SpirvInstruction *position,
SourceLocation loc) {
// Reciprocate SV_Position.w if requested
if (spirvOptions.invertW && spvContext.isPS()) {
const auto oldW = spvBuilder.createCompositeExtract(astContext.FloatTy,
position, {3}, loc);
const auto newW = spvBuilder.createBinaryOp(
spv::Op::OpFDiv, astContext.FloatTy,
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(1.0f)),
oldW, loc);
position = spvBuilder.createCompositeInsert(
astContext.getExtVectorType(astContext.FloatTy, 4), position, {3}, newW,
loc);
}
return position;
}
void DeclResultIdMapper::decorateInterpolationMode(
const NamedDecl *decl, QualType type, SpirvVariable *varInstr,
const SemanticInfo semanticInfo) {
if (varInstr->getStorageClass() != spv::StorageClass::Input &&
varInstr->getStorageClass() != spv::StorageClass::Output) {
return;
}
const bool isBaryCoord =
(semanticInfo.getKind() == hlsl::Semantic::Kind::Barycentrics);
uint32_t semanticIndex = semanticInfo.index;
if (isBaryCoord) {
// BaryCentrics inputs cannot have attrib 'nointerpolation'.
if (decl->getAttr<HLSLNoInterpolationAttr>()) {
emitError(
"SV_BaryCentrics inputs cannot have attribute 'nointerpolation'.",
decl->getLocation());
}
// SV_BaryCentrics could only have two index and apply to different inputs.
// The index should be 0 or 1, each index should be mapped to different
// interpolation type.
if (semanticIndex > 1) {
emitError("The index SV_BaryCentrics semantics could only be 1 or 0.",
decl->getLocation());
} else if (noPerspBaryCentricsIndex < 2 && perspBaryCentricsIndex < 2) {
emitError(
"Cannot have more than 2 inputs with SV_BaryCentrics semantics.",
decl->getLocation());
} else if (decl->getAttr<HLSLNoPerspectiveAttr>()) {
if (noPerspBaryCentricsIndex == 2 &&
perspBaryCentricsIndex != semanticIndex) {
noPerspBaryCentricsIndex = semanticIndex;
} else {
emitError("Cannot have more than 1 noperspective inputs with "
"SV_BaryCentrics semantics.",
decl->getLocation());
}
} else {
if (perspBaryCentricsIndex == 2 &&
noPerspBaryCentricsIndex != semanticIndex) {
perspBaryCentricsIndex = semanticIndex;
} else {
emitError("Cannot have more than 1 perspective-correct inputs with "
"SV_BaryCentrics semantics.",
decl->getLocation());
}
}
}
const auto loc = decl->getLocation();
if (isUintOrVecMatOfUintType(type) || isSintOrVecMatOfSintType(type) ||
isBoolOrVecMatOfBoolType(type)) {
// TODO: Probably we can call hlsl::ValidateSignatureElement() for the
// following check.
if (decl->getAttr<HLSLLinearAttr>() || decl->getAttr<HLSLCentroidAttr>() ||
decl->getAttr<HLSLNoPerspectiveAttr>() ||
decl->getAttr<HLSLSampleAttr>()) {
emitError("only nointerpolation mode allowed for integer input "
"parameters in pixel shader or integer output in vertex shader",
decl->getLocation());
} else {
spvBuilder.decorateFlat(varInstr, loc);
}
} else {
// Do nothing for HLSLLinearAttr since its the default
// Attributes can be used together. So cannot use else if.
if (decl->getAttr<HLSLCentroidAttr>())
spvBuilder.decorateCentroid(varInstr, loc);
if (decl->getAttr<HLSLNoInterpolationAttr>() && !isBaryCoord)
spvBuilder.decorateFlat(varInstr, loc);
if (decl->getAttr<HLSLNoPerspectiveAttr>() && !isBaryCoord)
spvBuilder.decorateNoPerspective(varInstr, loc);
if (decl->getAttr<HLSLSampleAttr>()) {
spvBuilder.decorateSample(varInstr, loc);
}
}
}
SpirvVariable *DeclResultIdMapper::getBuiltinVar(spv::BuiltIn builtIn,
QualType type,
spv::StorageClass sc,
SourceLocation loc) {
// Guarantee uniqueness
uint32_t spvBuiltinId = static_cast<uint32_t>(builtIn);
const auto builtInVar = builtinToVarMap.find(spvBuiltinId);
if (builtInVar != builtinToVarMap.end()) {
return builtInVar->second;
}
switch (builtIn) {
case spv::BuiltIn::HelperInvocation:
case spv::BuiltIn::SubgroupSize:
case spv::BuiltIn::SubgroupLocalInvocationId:
needsLegalization = true;
break;
}
// Create a dummy StageVar for this builtin variable
auto var = spvBuilder.addStageBuiltinVar(type, sc, builtIn,
/*isPrecise*/ false, loc);
if (spvContext.isPS() && sc == spv::StorageClass::Input) {
if (isUintOrVecMatOfUintType(type) || isSintOrVecMatOfSintType(type) ||
isBoolOrVecMatOfBoolType(type)) {
spvBuilder.decorateFlat(var, loc);
}
}
const hlsl::SigPoint *sigPoint =
hlsl::SigPoint::GetSigPoint(hlsl::SigPointFromInputQual(
hlsl::DxilParamInputQual::In, spvContext.getCurrentShaderModelKind(),
/*isPatchConstant=*/false));
StageVar stageVar(sigPoint, /*semaInfo=*/{}, /*builtinAttr=*/nullptr, type,
/*locAndComponentCount=*/{0, 0, false});
stageVar.setIsSpirvBuiltin();
stageVar.setSpirvInstr(var);
stageVars.push_back(stageVar);
// Store in map for re-use
builtinToVarMap[spvBuiltinId] = var;
return var;
}
SpirvVariable *DeclResultIdMapper::getBuiltinVar(spv::BuiltIn builtIn,
QualType type,
SourceLocation loc) {
spv::StorageClass sc = spv::StorageClass::Max;
// Valid builtins supported
switch (builtIn) {
case spv::BuiltIn::HelperInvocation:
case spv::BuiltIn::SubgroupSize:
case spv::BuiltIn::SubgroupLocalInvocationId:
case spv::BuiltIn::HitTNV:
case spv::BuiltIn::RayTmaxNV:
case spv::BuiltIn::RayTminNV:
case spv::BuiltIn::HitKindNV:
case spv::BuiltIn::IncomingRayFlagsNV:
case spv::BuiltIn::InstanceCustomIndexNV:
case spv::BuiltIn::RayGeometryIndexKHR:
case spv::BuiltIn::PrimitiveId:
case spv::BuiltIn::InstanceId:
case spv::BuiltIn::WorldRayDirectionNV:
case spv::BuiltIn::WorldRayOriginNV:
case spv::BuiltIn::ObjectRayDirectionNV:
case spv::BuiltIn::ObjectRayOriginNV:
case spv::BuiltIn::ObjectToWorldNV:
case spv::BuiltIn::WorldToObjectNV:
case spv::BuiltIn::LaunchIdNV:
case spv::BuiltIn::LaunchSizeNV:
case spv::BuiltIn::GlobalInvocationId:
case spv::BuiltIn::WorkgroupId:
case spv::BuiltIn::LocalInvocationIndex:
sc = spv::StorageClass::Input;
break;
case spv::BuiltIn::TaskCountNV:
case spv::BuiltIn::PrimitiveCountNV:
case spv::BuiltIn::PrimitiveIndicesNV:
case spv::BuiltIn::PrimitivePointIndicesEXT:
case spv::BuiltIn::PrimitiveLineIndicesEXT:
case spv::BuiltIn::PrimitiveTriangleIndicesEXT:
case spv::BuiltIn::CullPrimitiveEXT:
sc = spv::StorageClass::Output;
break;
default:
assert(false && "cannot infer storage class for SPIR-V builtin");
break;
}
return getBuiltinVar(builtIn, type, sc, loc);
}
SpirvFunction *
DeclResultIdMapper::getRayTracingStageVarEntryFunction(SpirvVariable *var) {
return rayTracingStageVarToEntryPoints[var];
}
SpirvVariable *DeclResultIdMapper::createSpirvStageVar(
StageVar *stageVar, const NamedDecl *decl, const llvm::StringRef name,
SourceLocation srcLoc) {
using spv::BuiltIn;
const auto sigPoint = stageVar->getSigPoint();
const auto semanticKind = stageVar->getSemanticInfo().getKind();
const auto sigPointKind = sigPoint->GetKind();
const auto type = stageVar->getAstType();
const auto isPrecise = decl->hasAttr<HLSLPreciseAttr>();
auto isNointerp = decl->hasAttr<HLSLNoInterpolationAttr>();
spv::StorageClass sc = getStorageClassForSigPoint(sigPoint);
if (sc == spv::StorageClass::Max)
return 0;
stageVar->setStorageClass(sc);
// [[vk::builtin(...)]] takes precedence.
if (const auto *builtinAttr = stageVar->getBuiltInAttr()) {
const auto spvBuiltIn =
llvm::StringSwitch<BuiltIn>(builtinAttr->getBuiltIn())
.Case("PointSize", BuiltIn::PointSize)
.Case("HelperInvocation", BuiltIn::HelperInvocation)
.Case("BaseVertex", BuiltIn::BaseVertex)
.Case("BaseInstance", BuiltIn::BaseInstance)
.Case("DrawIndex", BuiltIn::DrawIndex)
.Case("DeviceIndex", BuiltIn::DeviceIndex)
.Case("ViewportMaskNV", BuiltIn::ViewportMaskNV)
.Default(BuiltIn::Max);
assert(spvBuiltIn != BuiltIn::Max); // The frontend should guarantee this.
if (spvBuiltIn == BuiltIn::HelperInvocation &&
!featureManager.isTargetEnvVulkan1p3OrAbove()) {
// If [[vk::HelperInvocation]] is used for Vulkan 1.2 or less, we enable
// SPV_EXT_demote_to_helper_invocation extension to use
// OpIsHelperInvocationEXT instruction.
featureManager.allowExtension("SPV_EXT_demote_to_helper_invocation");
return spvBuilder.addVarForHelperInvocation(type, isPrecise, srcLoc);
}
return spvBuilder.addStageBuiltinVar(type, sc, spvBuiltIn, isPrecise,
srcLoc);
}
// The following translation assumes that semantic validity in the current
// shader model is already checked, so it only covers valid SigPoints for
// each semantic.
switch (semanticKind) {
// According to DXIL spec, the Position SV can be used by all SigPoints
// other than PCIn, HSIn, GSIn, PSOut, CSIn, MSIn, MSPOut, ASIn.
// According to Vulkan spec, the Position BuiltIn can only be used
// by VSOut, HS/DS/GS In/Out, MSOut.
case hlsl::Semantic::Kind::Position: {
if (sigPointKind == hlsl::SigPoint::Kind::VSOut &&
!containOnlyVecWithFourFloats(
type, theEmitter.getSpirvOptions().enable16BitTypes)) {
emitError("SV_Position must be a 4-component 32-bit float vector or a "
"composite which recursively contains only such a vector",
srcLoc);
}
switch (sigPointKind) {
case hlsl::SigPoint::Kind::VSIn:
case hlsl::SigPoint::Kind::PCOut:
case hlsl::SigPoint::Kind::DSIn:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::DSOut:
case hlsl::SigPoint::Kind::GSVIn:
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::MSOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::Position,
isPrecise, srcLoc);
case hlsl::SigPoint::Kind::PSIn:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::FragCoord,
isPrecise, srcLoc);
default:
llvm_unreachable("invalid usage of SV_Position sneaked in");
}
}
// According to DXIL spec, the VertexID SV can only be used by VSIn.
// According to Vulkan spec, the VertexIndex BuiltIn can only be used by
// VSIn.
case hlsl::Semantic::Kind::VertexID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::VertexIndex,
isPrecise, srcLoc);
}
// According to DXIL spec, the InstanceID SV can be used by VSIn, VSOut,
// HSCPIn, HSCPOut, DSCPIn, DSOut, GSVIn, GSOut, PSIn.
// According to Vulkan spec, the InstanceIndex BuitIn can only be used by
// VSIn.
case hlsl::Semantic::Kind::InstanceID: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::VSIn:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::InstanceIndex,
isPrecise, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::DSOut:
case hlsl::SigPoint::Kind::GSVIn:
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::PSIn:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
default:
llvm_unreachable("invalid usage of SV_InstanceID sneaked in");
}
}
// According to DXIL spec, the StartVertexLocation SV can only be used by
// VSIn. According to Vulkan spec, the BaseVertex BuiltIn can only be used by
// VSIn.
case hlsl::Semantic::Kind::StartVertexLocation: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::BaseVertex,
isPrecise, srcLoc);
}
// According to DXIL spec, the StartInstanceLocation SV can only be used by
// VSIn. According to Vulkan spec, the BaseInstance BuiltIn can only be used
// by VSIn.
case hlsl::Semantic::Kind::StartInstanceLocation: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::BaseInstance,
isPrecise, srcLoc);
}
// According to DXIL spec, the Depth{|GreaterEqual|LessEqual} SV can only be
// used by PSOut.
// According to Vulkan spec, the FragDepth BuiltIn can only be used by PSOut.
case hlsl::Semantic::Kind::Depth:
case hlsl::Semantic::Kind::DepthGreaterEqual:
case hlsl::Semantic::Kind::DepthLessEqual: {
stageVar->setIsSpirvBuiltin();
// Vulkan requires the DepthReplacing execution mode to write to FragDepth.
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::DepthReplacing, {}, srcLoc);
if (semanticKind == hlsl::Semantic::Kind::DepthGreaterEqual)
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::DepthGreater, {}, srcLoc);
else if (semanticKind == hlsl::Semantic::Kind::DepthLessEqual)
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::DepthLess,
{}, srcLoc);
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::FragDepth,
isPrecise, srcLoc);
}
// According to DXIL spec, the ClipDistance/CullDistance SV can be used by all
// SigPoints other than PCIn, HSIn, GSIn, PSOut, CSIn, MSIn, MSPOut, ASIn.
// According to Vulkan spec, the ClipDistance/CullDistance
// BuiltIn can only be used by VSOut, HS/DS/GS In/Out, MSOut.
case hlsl::Semantic::Kind::ClipDistance:
case hlsl::Semantic::Kind::CullDistance: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::VSIn:
case hlsl::SigPoint::Kind::PCOut:
case hlsl::SigPoint::Kind::DSIn:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::DSOut:
case hlsl::SigPoint::Kind::GSVIn:
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::PSIn:
case hlsl::SigPoint::Kind::MSOut:
llvm_unreachable("should be handled in gl_PerVertex struct");
default:
llvm_unreachable(
"invalid usage of SV_ClipDistance/SV_CullDistance sneaked in");
}
}
// According to DXIL spec, the IsFrontFace SV can only be used by GSOut and
// PSIn.
// According to Vulkan spec, the FrontFacing BuitIn can only be used in PSIn.
case hlsl::Semantic::Kind::IsFrontFace: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::GSOut:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
case hlsl::SigPoint::Kind::PSIn:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::FrontFacing,
isPrecise, srcLoc);
default:
llvm_unreachable("invalid usage of SV_IsFrontFace sneaked in");
}
}
// According to DXIL spec, the Target SV can only be used by PSOut.
// There is no corresponding builtin decoration in SPIR-V. So generate normal
// Vulkan stage input/output variables.
case hlsl::Semantic::Kind::Target:
// An arbitrary semantic is defined by users. Generate normal Vulkan stage
// input/output variables.
case hlsl::Semantic::Kind::Arbitrary: {
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise, isNointerp,
srcLoc);
// TODO: patch constant function in hull shader
}
// According to DXIL spec, the DispatchThreadID SV can only be used by CSIn.
// According to Vulkan spec, the GlobalInvocationId can only be used in CSIn.
case hlsl::Semantic::Kind::DispatchThreadID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::GlobalInvocationId,
isPrecise, srcLoc);
}
// According to DXIL spec, the GroupID SV can only be used by CSIn.
// According to Vulkan spec, the WorkgroupId can only be used in CSIn.
case hlsl::Semantic::Kind::GroupID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::WorkgroupId,
isPrecise, srcLoc);
}
// According to DXIL spec, the GroupThreadID SV can only be used by CSIn.
// According to Vulkan spec, the LocalInvocationId can only be used in CSIn.
case hlsl::Semantic::Kind::GroupThreadID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::LocalInvocationId,
isPrecise, srcLoc);
}
// According to DXIL spec, the GroupIndex SV can only be used by CSIn.
// According to Vulkan spec, the LocalInvocationIndex can only be used in
// CSIn.
case hlsl::Semantic::Kind::GroupIndex: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(
type, sc, BuiltIn::LocalInvocationIndex, isPrecise, srcLoc);
}
// According to DXIL spec, the OutputControlID SV can only be used by HSIn.
// According to Vulkan spec, the InvocationId BuiltIn can only be used in
// HS/GS In.
case hlsl::Semantic::Kind::OutputControlPointID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::InvocationId,
isPrecise, srcLoc);
}
// According to DXIL spec, the PrimitiveID SV can only be used by PCIn, HSIn,
// DSIn, GSIn, GSOut, PSIn, and MSPOut.
// According to Vulkan spec, the PrimitiveId BuiltIn can only be used in
// HS/DS/PS In, GS In/Out, MSPOut.
case hlsl::Semantic::Kind::PrimitiveID: {
// Translate to PrimitiveId BuiltIn for all valid SigPoints.
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::PrimitiveId,
isPrecise, srcLoc);
}
// According to DXIL spec, the TessFactor SV can only be used by PCOut and
// DSIn.
// According to Vulkan spec, the TessLevelOuter BuiltIn can only be used in
// PCOut and DSIn.
case hlsl::Semantic::Kind::TessFactor: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::TessLevelOuter,
isPrecise, srcLoc);
}
// According to DXIL spec, the InsideTessFactor SV can only be used by PCOut
// and DSIn.
// According to Vulkan spec, the TessLevelInner BuiltIn can only be used in
// PCOut and DSIn.
case hlsl::Semantic::Kind::InsideTessFactor: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::TessLevelInner,
isPrecise, srcLoc);
}
// According to DXIL spec, the DomainLocation SV can only be used by DSIn.
// According to Vulkan spec, the TessCoord BuiltIn can only be used in DSIn.
case hlsl::Semantic::Kind::DomainLocation: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::TessCoord,
isPrecise, srcLoc);
}
// According to DXIL spec, the GSInstanceID SV can only be used by GSIn.
// According to Vulkan spec, the InvocationId BuiltIn can only be used in
// HS/GS In.
case hlsl::Semantic::Kind::GSInstanceID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::InvocationId,
isPrecise, srcLoc);
}
// According to DXIL spec, the SampleIndex SV can only be used by PSIn.
// According to Vulkan spec, the SampleId BuiltIn can only be used in PSIn.
case hlsl::Semantic::Kind::SampleIndex: {
setInterlockExecutionMode(spv::ExecutionMode::SampleInterlockOrderedEXT);
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::SampleId, isPrecise,
srcLoc);
}
// According to DXIL spec, the StencilRef SV can only be used by PSOut.
case hlsl::Semantic::Kind::StencilRef: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::FragStencilRefEXT,
isPrecise, srcLoc);
}
// According to DXIL spec, the Barycentrics SV can only be used by PSIn.
case hlsl::Semantic::Kind::Barycentrics: {
stageVar->setIsSpirvBuiltin();
// Selecting the correct builtin according to interpolation mode
auto bi = BuiltIn::Max;
if (decl->hasAttr<HLSLNoPerspectiveAttr>()) {
bi = BuiltIn::BaryCoordNoPerspKHR;
} else {
bi = BuiltIn::BaryCoordKHR;
}
return spvBuilder.addStageBuiltinVar(type, sc, bi, isPrecise, srcLoc);
}
// According to DXIL spec, the RenderTargetArrayIndex SV can only be used by
// VSIn, VSOut, HSCPIn, HSCPOut, DSIn, DSOut, GSVIn, GSOut, PSIn, MSPOut.
// According to Vulkan spec, the Layer BuiltIn can only be used in GSOut
// PSIn, and MSPOut.
case hlsl::Semantic::Kind::RenderTargetArrayIndex: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::VSIn:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::PCOut:
case hlsl::SigPoint::Kind::DSIn:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::GSVIn:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::DSOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::Layer, isPrecise,
srcLoc);
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::PSIn:
case hlsl::SigPoint::Kind::MSPOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::Layer, isPrecise,
srcLoc);
default:
llvm_unreachable("invalid usage of SV_RenderTargetArrayIndex sneaked in");
}
}
// According to DXIL spec, the ViewportArrayIndex SV can only be used by
// VSIn, VSOut, HSCPIn, HSCPOut, DSIn, DSOut, GSVIn, GSOut, PSIn, MSPOut.
// According to Vulkan spec, the ViewportIndex BuiltIn can only be used in
// GSOut, PSIn, and MSPOut.
case hlsl::Semantic::Kind::ViewPortArrayIndex: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::VSIn:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::PCOut:
case hlsl::SigPoint::Kind::DSIn:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::GSVIn:
return spvBuilder.addStageIOVar(type, sc, name.str(), isPrecise,
isNointerp, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::DSOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::ViewportIndex,
isPrecise, srcLoc);
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::PSIn:
case hlsl::SigPoint::Kind::MSPOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::ViewportIndex,
isPrecise, srcLoc);
default:
llvm_unreachable("invalid usage of SV_ViewportArrayIndex sneaked in");
}
}
// According to DXIL spec, the Coverage SV can only be used by PSIn and PSOut.
// According to Vulkan spec, the SampleMask BuiltIn can only be used in
// PSIn and PSOut.
case hlsl::Semantic::Kind::Coverage: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::SampleMask,
isPrecise, srcLoc);
}
// According to DXIL spec, the ViewID SV can only be used by VSIn, PCIn,
// HSIn, DSIn, GSIn, PSIn.
// According to Vulkan spec, the ViewIndex BuiltIn can only be used in
// VS/HS/DS/GS/PS input.
case hlsl::Semantic::Kind::ViewID: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::ViewIndex,
isPrecise, srcLoc);
}
// According to DXIL spec, the InnerCoverage SV can only be used as PSIn.
// According to Vulkan spec, the FullyCoveredEXT BuiltIn can only be used as
// PSIn.
case hlsl::Semantic::Kind::InnerCoverage: {
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::FullyCoveredEXT,
isPrecise, srcLoc);
}
// According to DXIL spec, the ShadingRate SV can only be used by GSOut,
// VSOut, or PSIn. According to Vulkan spec, the FragSizeEXT BuiltIn can only
// be used as VSOut, GSOut, MSOut or PSIn.
case hlsl::Semantic::Kind::ShadingRate: {
setInterlockExecutionMode(
spv::ExecutionMode::ShadingRateInterlockOrderedEXT);
switch (sigPointKind) {
case hlsl::SigPoint::Kind::PSIn:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::ShadingRateKHR,
isPrecise, srcLoc);
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::MSOut:
case hlsl::SigPoint::Kind::MSPOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(
type, sc, BuiltIn::PrimitiveShadingRateKHR, isPrecise, srcLoc);
default:
emitError("semantic ShadingRate must be used only for PSIn, VSOut, "
"GSOut, MSOut",
srcLoc);
break;
}
break;
}
// According to DXIL spec, the ShadingRate SV can only be used by
// MSPOut or PSIn.
// According to Vulkan spec, the CullPrimitiveEXT BuiltIn can only
// be used as MSOut.
case hlsl::Semantic::Kind::CullPrimitive: {
switch (sigPointKind) {
case hlsl::SigPoint::Kind::PSIn:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::CullPrimitiveEXT,
isPrecise, srcLoc);
case hlsl::SigPoint::Kind::MSPOut:
stageVar->setIsSpirvBuiltin();
return spvBuilder.addStageBuiltinVar(type, sc, BuiltIn::CullPrimitiveEXT,
isPrecise, srcLoc);
default:
emitError("semantic CullPrimitive must be used only for PSIn, MSPOut",
srcLoc);
break;
}
break;
}
default:
emitError("semantic %0 unimplemented", srcLoc)
<< stageVar->getSemanticStr();
break;
}
return 0;
}
spv::StorageClass
DeclResultIdMapper::getStorageClassForSigPoint(const hlsl::SigPoint *sigPoint) {
// This translation is done based on the HLSL reference (see docs/dxil.rst).
const auto sigPointKind = sigPoint->GetKind();
const auto signatureKind = sigPoint->GetSignatureKind();
spv::StorageClass sc = spv::StorageClass::Max;
switch (signatureKind) {
case hlsl::DXIL::SignatureKind::Input:
sc = spv::StorageClass::Input;
break;
case hlsl::DXIL::SignatureKind::Output:
sc = spv::StorageClass::Output;
break;
case hlsl::DXIL::SignatureKind::Invalid: {
// There are some special cases in HLSL (See docs/dxil.rst):
// SignatureKind is "invalid" for PCIn, HSIn, GSIn, and CSIn.
switch (sigPointKind) {
case hlsl::DXIL::SigPointKind::PCIn:
case hlsl::DXIL::SigPointKind::HSIn:
case hlsl::DXIL::SigPointKind::GSIn:
case hlsl::DXIL::SigPointKind::CSIn:
case hlsl::DXIL::SigPointKind::MSIn:
case hlsl::DXIL::SigPointKind::ASIn:
sc = spv::StorageClass::Input;
break;
default:
llvm_unreachable("Found invalid SigPoint kind for semantic");
}
break;
}
case hlsl::DXIL::SignatureKind::PatchConstOrPrim: {
// There are some special cases in HLSL (See docs/dxil.rst):
// SignatureKind is "PatchConstOrPrim" for PCOut, MSPOut and DSIn.
switch (sigPointKind) {
case hlsl::DXIL::SigPointKind::PCOut:
case hlsl::DXIL::SigPointKind::MSPOut:
// Patch Constant Output (Output of Hull which is passed to Domain).
// Mesh Shader per-primitive output attributes.
sc = spv::StorageClass::Output;
break;
case hlsl::DXIL::SigPointKind::DSIn:
// Domain Shader regular input - Patch Constant data plus system values.
sc = spv::StorageClass::Input;
break;
default:
llvm_unreachable("Found invalid SigPoint kind for semantic");
}
break;
}
default:
llvm_unreachable("Found invalid SigPoint kind for semantic");
}
return sc;
}
QualType DeclResultIdMapper::getTypeAndCreateCounterForPotentialAliasVar(
const DeclaratorDecl *decl, bool *shouldBeAlias) {
if (const auto *varDecl = dyn_cast<VarDecl>(decl)) {
// This method is only intended to be used to create SPIR-V variables in the
// Function or Private storage class.
assert(!SpirvEmitter::isExternalVar(varDecl));
}
const QualType type = getTypeOrFnRetType(decl);
// Whether we should generate this decl as an alias variable.
bool genAlias = false;
// For ConstantBuffers, TextureBuffers, StructuredBuffers, ByteAddressBuffers
if (isConstantTextureBuffer(type) ||
isOrContainsAKindOfStructuredOrByteBuffer(type)) {
genAlias = true;
}
// Return via parameter whether alias was generated.
if (shouldBeAlias)
*shouldBeAlias = genAlias;
if (genAlias) {
needsLegalization = true;
createCounterVarForDecl(decl);
}
return type;
}
bool DeclResultIdMapper::getImplicitRegisterType(const ResourceVar &var,
char *registerTypeOut) const {
assert(registerTypeOut);
if (var.getSpirvInstr()) {
if (var.getSpirvInstr()->hasAstResultType()) {
QualType type = var.getSpirvInstr()->getAstResultType();
// Strip outer arrayness first
while (type->isArrayType())
type = type->getAsArrayTypeUnsafe()->getElementType();
// t - for shader resource views (SRV)
if (isTexture(type) || isNonWritableStructuredBuffer(type) ||
isByteAddressBuffer(type) || isBuffer(type)) {
*registerTypeOut = 't';
return true;
}
// s - for samplers
else if (isSampler(type)) {
*registerTypeOut = 's';
return true;
}
// u - for unordered access views (UAV)
else if (isRWByteAddressBuffer(type) || isRWAppendConsumeSBuffer(type) ||
isRWBuffer(type) || isRWTexture(type)) {
*registerTypeOut = 'u';
return true;
}
// b - for constant buffer
// views (CBV)
else if (isConstantBuffer(type)) {
*registerTypeOut = 'b';
return true;
}
} else {
llvm::StringRef hlslUserType = var.getSpirvInstr()->getHlslUserType();
// b - for constant buffer views (CBV)
if (var.isGlobalsBuffer() || hlslUserType == "cbuffer" ||
hlslUserType == "ConstantBuffer") {
*registerTypeOut = 'b';
return true;
}
if (hlslUserType == "tbuffer") {
*registerTypeOut = 't';
return true;
}
}
}
*registerTypeOut = '\0';
return false;
}
SpirvVariable *
DeclResultIdMapper::createRayTracingNVStageVar(spv::StorageClass sc,
const VarDecl *decl) {
return createRayTracingNVStageVar(sc, decl->getType(), decl->getName().str(),
decl->hasAttr<HLSLPreciseAttr>(),
decl->hasAttr<HLSLNoInterpolationAttr>());
}
SpirvVariable *DeclResultIdMapper::createRayTracingNVStageVar(
spv::StorageClass sc, QualType type, std::string name, bool isPrecise,
bool isNointerp) {
SpirvVariable *retVal = nullptr;
// Raytracing interface variables are special since they do not participate
// in any interface matching and hence do not create StageVar and
// track them under StageVars vector
switch (sc) {
case spv::StorageClass::IncomingRayPayloadNV:
case spv::StorageClass::IncomingCallableDataNV:
case spv::StorageClass::HitAttributeNV:
case spv::StorageClass::RayPayloadNV:
case spv::StorageClass::CallableDataNV:
retVal = spvBuilder.addModuleVar(type, sc, isPrecise, isNointerp, name);
break;
default:
assert(false && "Unsupported SPIR-V storage class for raytracing");
}
rayTracingStageVarToEntryPoints[retVal] = entryFunction;
return retVal;
}
void DeclResultIdMapper::tryToCreateImplicitConstVar(const ValueDecl *decl) {
const VarDecl *varDecl = dyn_cast<VarDecl>(decl);
if (!varDecl || !varDecl->isImplicit())
return;
APValue *val = varDecl->evaluateValue();
if (!val)
return;
SpirvInstruction *constVal =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, val->getInt());
constVal->setRValue(true);
astDecls[varDecl].instr = constVal;
}
void DeclResultIdMapper::decorateWithIntrinsicAttrs(
const NamedDecl *decl, SpirvVariable *varInst,
llvm::function_ref<void(VKDecorateExtAttr *)> extraFunctionForDecoAttr) {
if (!decl->hasAttrs())
return;
// TODO: Handle member field in a struct and function parameter.
for (auto &attr : decl->getAttrs()) {
if (auto decoAttr = dyn_cast<VKDecorateExtAttr>(attr)) {
spvBuilder.decorateWithLiterals(
varInst, decoAttr->getDecorate(),
{decoAttr->literals_begin(), decoAttr->literals_end()},
varInst->getSourceLocation());
extraFunctionForDecoAttr(decoAttr);
continue;
}
if (auto decoAttr = dyn_cast<VKDecorateIdExtAttr>(attr)) {
llvm::SmallVector<SpirvInstruction *, 2> args;
for (Expr *arg : decoAttr->arguments()) {
args.push_back(theEmitter.doExpr(arg));
}
spvBuilder.decorateWithIds(varInst, decoAttr->getDecorate(), args,
varInst->getSourceLocation());
continue;
}
if (auto decoAttr = dyn_cast<VKDecorateStringExtAttr>(attr)) {
llvm::SmallVector<llvm::StringRef, 2> args(decoAttr->arguments_begin(),
decoAttr->arguments_end());
spvBuilder.decorateWithStrings(varInst, decoAttr->getDecorate(), args,
varInst->getSourceLocation());
continue;
}
}
}
void DeclResultIdMapper::decorateStageVarWithIntrinsicAttrs(
const NamedDecl *decl, StageVar *stageVar, SpirvVariable *varInst) {
auto checkBuiltInLocationDecoration =
[stageVar](const VKDecorateExtAttr *decoAttr) {
auto decorate = static_cast<spv::Decoration>(decoAttr->getDecorate());
if (decorate == spv::Decoration::BuiltIn ||
decorate == spv::Decoration::Location) {
// This information will be used to avoid
// assigning multiple location decorations
// in finalizeStageIOLocations()
stageVar->setIsLocOrBuiltinDecorateAttr();
}
};
decorateWithIntrinsicAttrs(decl, varInst, checkBuiltInLocationDecoration);
}
void DeclResultIdMapper::setInterlockExecutionMode(spv::ExecutionMode mode) {
interlockExecutionMode = mode;
}
spv::ExecutionMode DeclResultIdMapper::getInterlockExecutionMode() {
return interlockExecutionMode.getValueOr(
spv::ExecutionMode::PixelInterlockOrderedEXT);
}
void DeclResultIdMapper::copyHullOutStageVarsToOutputPatch(
SpirvInstruction *hullMainOutputPatch, const ParmVarDecl *outputPatchDecl,
QualType outputControlPointType, uint32_t numOutputControlPoints) {
for (uint32_t outputCtrlPoint = 0; outputCtrlPoint < numOutputControlPoints;
++outputCtrlPoint) {
SpirvConstant *index = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, outputCtrlPoint));
auto *tempLocation = spvBuilder.createAccessChain(
outputControlPointType, hullMainOutputPatch, {index}, /*loc=*/{});
storeOutStageVarsToStorage(cast<DeclaratorDecl>(outputPatchDecl), index,
outputControlPointType, tempLocation);
}
}
void DeclResultIdMapper::storeOutStageVarsToStorage(
const DeclaratorDecl *outputPatchDecl, SpirvConstant *ctrlPointID,
QualType outputControlPointType, SpirvInstruction *ptr) {
if (!outputControlPointType->isStructureType()) {
const auto found = stageVarInstructions.find(outputPatchDecl);
if (found == stageVarInstructions.end()) {
emitError("Shader output variable '%0' was not created", {})
<< outputPatchDecl->getName();
}
auto *ptrToOutputStageVar = spvBuilder.createAccessChain(
outputControlPointType, found->second, {ctrlPointID}, /*loc=*/{});
auto *load =
spvBuilder.createLoad(outputControlPointType, ptrToOutputStageVar,
/*loc=*/{});
spvBuilder.createStore(ptr, load, /*loc=*/{});
return;
}
const auto *recordType = outputControlPointType->getAs<RecordType>();
assert(recordType != nullptr);
const auto *structDecl = recordType->getDecl();
assert(structDecl != nullptr);
uint32_t index = 0;
for (const auto *field : structDecl->fields()) {
SpirvConstant *indexInst = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, index));
auto *tempLocation = spvBuilder.createAccessChain(field->getType(), ptr,
{indexInst}, /*loc=*/{});
storeOutStageVarsToStorage(cast<DeclaratorDecl>(field), ctrlPointID,
field->getType(), tempLocation);
++index;
}
}
void DeclResultIdMapper::registerCapabilitiesAndExtensionsForType(
const TypedefType *type) {
for (const auto *decl : typeAliasesWithAttributes) {
if (type == decl->getTypeForDecl()) {
for (auto *attribute : decl->specific_attrs<VKExtensionExtAttr>()) {
clang::StringRef extensionName = attribute->getName();
spvBuilder.requireExtension(extensionName, decl->getLocation());
}
for (auto *attribute : decl->specific_attrs<VKCapabilityExtAttr>()) {
spv::Capability cap = spv::Capability(attribute->getCapability());
spvBuilder.requireCapability(cap, decl->getLocation());
}
}
}
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/EmitVisitor.cpp | //===--- EmitVisitor.cpp - SPIR-V Emit Visitor Implementation ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// Do not change the inclusion order between "dxc/Support/*" files.
// clang-format off
#include "EmitVisitor.h"
#include "dxc/Support/Global.h"
#include "dxc/Support/WinIncludes.h"
#include "dxc/Support/dxcapi.use.h"
#include "dxc/Support/HLSLOptions.h"
#include "dxc/Support/FileIOHelper.h"
#include "clang/SPIRV/BitwiseCast.h"
#include "clang/SPIRV/SpirvBasicBlock.h"
#include "clang/SPIRV/SpirvFunction.h"
#include "clang/SPIRV/SpirvInstruction.h"
#include "clang/SPIRV/SpirvType.h"
#include "clang/SPIRV/String.h"
// clang-format on
#include <functional>
namespace clang {
namespace spirv {
namespace {
static const uint32_t kMaximumCharOpSource = 0xFFFA;
static const uint32_t kMaximumCharOpSourceContinued = 0xFFFD;
// Since OpSource does not have a result id, this is used to mark it was emitted
static const uint32_t kEmittedSourceForOpSource = 1;
/// Chops the given original string into multiple smaller ones to make sure they
/// can be encoded in a sequence of OpSourceContinued instructions following an
/// OpSource instruction.
void chopString(llvm::StringRef original,
llvm::SmallVectorImpl<std::string> *chopped,
uint32_t maxCharInOpSource, uint32_t maxCharInContinue) {
chopped->clear();
if (original.size() > maxCharInOpSource) {
chopped->push_back(llvm::StringRef(original.data(), maxCharInOpSource));
original = llvm::StringRef(original.data() + maxCharInOpSource,
original.size() - maxCharInOpSource);
while (original.size() > maxCharInContinue) {
chopped->push_back(llvm::StringRef(original.data(), maxCharInContinue));
original = llvm::StringRef(original.data() + maxCharInContinue,
original.size() - maxCharInContinue);
}
if (!original.empty()) {
chopped->push_back(original);
}
} else if (!original.empty()) {
chopped->push_back(original);
}
}
/// Returns true if an OpLine instruction can be emitted for the given OpCode.
/// According to the SPIR-V Spec section 2.4 (Logical Layout of a Module), the
/// first section to allow use of OpLine debug information is after all
/// annotation instructions.
bool isOpLineLegalForOp(spv::Op op) {
switch (op) {
// Preamble binary
case spv::Op::OpCapability:
case spv::Op::OpExtension:
case spv::Op::OpExtInstImport:
case spv::Op::OpMemoryModel:
case spv::Op::OpEntryPoint:
case spv::Op::OpExecutionMode:
case spv::Op::OpExecutionModeId:
// Debug binary
case spv::Op::OpString:
case spv::Op::OpSource:
case spv::Op::OpSourceExtension:
case spv::Op::OpSourceContinued:
case spv::Op::OpName:
case spv::Op::OpMemberName:
// Annotation binary
case spv::Op::OpModuleProcessed:
case spv::Op::OpDecorate:
case spv::Op::OpDecorateId:
case spv::Op::OpMemberDecorate:
case spv::Op::OpGroupDecorate:
case spv::Op::OpGroupMemberDecorate:
case spv::Op::OpDecorationGroup:
case spv::Op::OpDecorateStringGOOGLE:
case spv::Op::OpMemberDecorateStringGOOGLE:
return false;
default:
return true;
}
}
/// Returns true if DebugLine instruction can be emitted for the given OpCode.
/// As a nonsemantic OpExtInst, there are several more ops that it cannot appear
/// before than an OpLine. Assumes illegal ops for OpLine have already been
/// eliminated.
bool isDebugLineLegalForOp(spv::Op op) {
switch (op) {
case spv::Op::OpFunction:
case spv::Op::OpFunctionParameter:
case spv::Op::OpLabel:
case spv::Op::OpVariable:
case spv::Op::OpPhi:
return false;
default:
return true;
}
}
// Returns SPIR-V version that will be used in SPIR-V header section.
uint32_t getHeaderVersion(spv_target_env env) {
if (env >= SPV_ENV_UNIVERSAL_1_6)
return 0x00010600u;
if (env >= SPV_ENV_UNIVERSAL_1_5)
return 0x00010500u;
if (env >= SPV_ENV_UNIVERSAL_1_4)
return 0x00010400u;
if (env >= SPV_ENV_UNIVERSAL_1_3)
return 0x00010300u;
if (env >= SPV_ENV_UNIVERSAL_1_2)
return 0x00010200u;
if (env >= SPV_ENV_UNIVERSAL_1_1)
return 0x00010100u;
return 0x00010000u;
}
// Read the file in |filePath| and returns its contents as a string.
// This function will be used by DebugSource to get its source code.
std::string
ReadSourceCode(llvm::StringRef filePath,
const clang::spirv::SpirvCodeGenOptions &spvOptions) {
try {
dxc::DxcDllSupport dllSupport;
IFT(dllSupport.Initialize());
CComPtr<IDxcLibrary> pLibrary;
IFT(dllSupport.CreateInstance(CLSID_DxcLibrary, &pLibrary));
CComPtr<IDxcBlobEncoding> pSource;
std::wstring srcFile(filePath.begin(), filePath.end());
IFT(pLibrary->CreateBlobFromFile(srcFile.c_str(), nullptr, &pSource));
CComPtr<IDxcBlobUtf8> utf8Source;
IFT(hlsl::DxcGetBlobAsUtf8(pSource, nullptr, &utf8Source));
return std::string(utf8Source->GetStringPointer(),
utf8Source->GetStringLength());
} catch (...) {
// An exception has occurred while reading the file
// return the original source (which may have been supplied directly)
if (!spvOptions.origSource.empty()) {
return spvOptions.origSource.c_str();
}
return "";
}
}
// Returns a vector of strings after chopping |inst| for the operand size
// limitation of OpSource.
llvm::SmallVector<std::string, 2>
getChoppedSourceCode(SpirvSource *inst,
const clang::spirv::SpirvCodeGenOptions &spvOptions) {
std::string text = ReadSourceCode(inst->getFile()->getString(), spvOptions);
if (text.empty()) {
text = inst->getSource().str();
}
llvm::SmallVector<std::string, 2> choppedSrcCode;
if (!text.empty()) {
chopString(text, &choppedSrcCode, kMaximumCharOpSource,
kMaximumCharOpSourceContinued);
}
return choppedSrcCode;
}
constexpr uint32_t kGeneratorNumber = 14;
constexpr uint32_t kToolVersion = 0;
} // anonymous namespace
EmitVisitor::Header::Header(uint32_t bound_, uint32_t version_)
// We are using the unfied header, which shows spv::Version as the newest
// version. But we need to stick to 1.0 for Vulkan consumption by default.
: magicNumber(spv::MagicNumber), version(version_),
generator((kGeneratorNumber << 16) | kToolVersion), bound(bound_),
reserved(0) {}
EmitVisitor::~EmitVisitor() {
for (auto *i : spvInstructions)
i->releaseMemory();
}
template <>
uint32_t
EmitVisitor::getOrAssignResultId<SpirvInstruction>(SpirvInstruction *obj) {
auto *str = dyn_cast<SpirvString>(obj);
if (str != nullptr) {
auto it = stringIdMap.find(str->getString());
if (it != stringIdMap.end()) {
return it->second;
}
}
if (!obj->getResultId()) {
obj->setResultId(takeNextId());
}
if (str != nullptr) {
stringIdMap[str->getString()] = obj->getResultId();
}
return obj->getResultId();
}
std::vector<uint32_t> EmitVisitor::Header::takeBinary() {
std::vector<uint32_t> words;
words.push_back(magicNumber);
words.push_back(version);
words.push_back(generator);
words.push_back(bound);
words.push_back(reserved);
return words;
}
uint32_t EmitVisitor::getOrCreateOpStringId(llvm::StringRef str) {
auto it = stringIdMap.find(str);
if (it != stringIdMap.end()) {
return it->second;
}
SpirvString *opString = new (context) SpirvString(/*SourceLocation*/ {}, str);
visit(opString);
spvInstructions.push_back(opString);
return getOrAssignResultId<SpirvInstruction>(opString);
}
uint32_t EmitVisitor::getLiteralEncodedForDebugInfo(uint32_t val) {
if (spvOptions.debugInfoVulkan) {
return typeHandler.getOrCreateConstantInt(
llvm::APInt(32, val), context.getUIntType(32), /*isSpecConst */ false);
} else {
return val;
}
}
void EmitVisitor::emitDebugNameForInstruction(uint32_t resultId,
llvm::StringRef debugName) {
// Most instructions do not have a debug name associated with them.
if (debugName.empty())
return;
curInst.clear();
curInst.push_back(static_cast<uint32_t>(spv::Op::OpName));
curInst.push_back(resultId);
encodeString(debugName);
curInst[0] |= static_cast<uint32_t>(curInst.size()) << 16;
debugVariableBinary.insert(debugVariableBinary.end(), curInst.begin(),
curInst.end());
}
void EmitVisitor::emitDebugLine(spv::Op op, const SourceLocation &loc,
const SourceRange &range,
std::vector<uint32_t> *section,
bool isDebugScope) {
if (!spvOptions.debugInfoLine)
return;
// Technically entry function wrappers do not exist in HLSL. They are just
// created by DXC. We do not want to emit line information for their
// instructions. To prevent spirv-opt from removing all debug info, we emit
// OpLines to specify the beginning and end of the function.
if (inEntryFunctionWrapper &&
(op != spv::Op::OpReturn && op != spv::Op::OpFunction))
return;
// Based on SPIR-V spec, OpSelectionMerge must immediately precede either an
// OpBranchConditional or OpSwitch instruction. Similarly OpLoopMerge must
// immediately precede either an OpBranch or OpBranchConditional instruction.
if (lastOpWasMergeInst) {
lastOpWasMergeInst = false;
debugLineStart = 0;
debugColumnStart = 0;
debugLineEnd = 0;
debugColumnEnd = 0;
return;
}
if (op == spv::Op::OpSelectionMerge || op == spv::Op::OpLoopMerge)
lastOpWasMergeInst = true;
if (!isOpLineLegalForOp(op))
return;
// If emitting Debug[No]Line, since it is an nonsemantic OpExtInst, it can
// only appear in blocks after any OpPhi or OpVariable.
if (spvOptions.debugInfoVulkan && !isDebugLineLegalForOp(op))
return;
// DebugGlobalVariable and DebugLocalVariable of rich DebugInfo already has
// the line and the column information. We do not want to emit OpLine for
// global variables and local variables. Instead, we want to emit OpLine for
// their initialization if exists.
if (op == spv::Op::OpVariable)
return;
// If no SourceLocation is provided, we have to emit OpNoLine to
// specify the previous OpLine is not applied to this instruction.
if (loc == SourceLocation()) {
if (!isDebugScope && (debugLineStart != 0 || debugColumnStart != 0)) {
curInst.clear();
if (spvOptions.debugInfoVulkan) {
curInst.push_back(static_cast<uint32_t>(spv::Op::OpExtInst));
curInst.push_back(typeHandler.emitType(context.getVoidType()));
curInst.push_back(takeNextId());
curInst.push_back(debugInfoExtInstId);
curInst.push_back(104u); // DebugNoLine
} else {
curInst.push_back(static_cast<uint32_t>(spv::Op::OpNoLine));
}
curInst[0] |= static_cast<uint32_t>(curInst.size()) << 16;
section->insert(section->end(), curInst.begin(), curInst.end());
}
debugLineStart = 0;
debugColumnStart = 0;
debugLineEnd = 0;
debugColumnEnd = 0;
return;
}
auto fileId = debugMainFileId;
const auto &sm = astContext.getSourceManager();
const char *fileName = sm.getPresumedLoc(loc).getFilename();
if (fileName)
fileId = getOrCreateOpStringId(fileName);
uint32_t lineStart;
uint32_t lineEnd;
uint32_t columnStart;
uint32_t columnEnd;
if (!spvOptions.debugInfoVulkan || range.isInvalid()) {
lineStart = sm.getPresumedLineNumber(loc);
columnStart = sm.getPresumedColumnNumber(loc);
lineEnd = lineStart;
columnEnd = columnStart;
} else {
SourceLocation locStart = range.getBegin();
lineStart = sm.getPresumedLineNumber(locStart);
columnStart = sm.getPresumedColumnNumber(locStart);
SourceLocation locEnd = range.getEnd();
lineEnd = sm.getPresumedLineNumber(locEnd);
columnEnd = sm.getPresumedColumnNumber(locEnd);
}
// If it is a terminator, just reset the last line and column because
// a terminator makes the OpLine not effective.
bool resetLine = (op >= spv::Op::OpBranch && op <= spv::Op::OpUnreachable) ||
op == spv::Op::OpTerminateInvocation;
if (!fileId || !lineStart || !columnStart ||
(lineStart == debugLineStart && columnStart == debugColumnStart &&
lineEnd == debugLineEnd && columnEnd == debugColumnEnd)) {
if (resetLine) {
debugLineStart = 0;
debugColumnStart = 0;
debugLineEnd = 0;
debugColumnEnd = 0;
}
return;
}
assert(section);
if (resetLine) {
debugLineStart = 0;
debugColumnStart = 0;
debugLineEnd = 0;
debugColumnEnd = 0;
} else {
// Keep the last line and column to avoid printing the duplicated OpLine.
debugLineStart = lineStart;
debugColumnStart = columnStart;
debugLineEnd = lineEnd;
debugColumnEnd = columnEnd;
}
if ((emittedSource[fileId] == 0) && (spvOptions.debugInfoVulkan)) {
SpirvDebugSource *src = new (context) SpirvDebugSource(fileName, "");
visit(src);
spvInstructions.push_back(src);
}
curInst.clear();
if (!spvOptions.debugInfoVulkan) {
curInst.push_back(static_cast<uint32_t>(spv::Op::OpLine));
curInst.push_back(fileId);
curInst.push_back(lineStart);
curInst.push_back(columnStart);
} else {
curInst.push_back(static_cast<uint32_t>(spv::Op::OpExtInst));
curInst.push_back(typeHandler.emitType(context.getVoidType()));
curInst.push_back(takeNextId());
curInst.push_back(debugInfoExtInstId);
curInst.push_back(103u); // DebugLine
curInst.push_back(emittedSource[fileId]);
curInst.push_back(getLiteralEncodedForDebugInfo(lineStart));
curInst.push_back(getLiteralEncodedForDebugInfo(lineEnd));
curInst.push_back(getLiteralEncodedForDebugInfo(columnStart));
curInst.push_back(getLiteralEncodedForDebugInfo(columnEnd));
}
curInst[0] |= static_cast<uint32_t>(curInst.size()) << 16;
section->insert(section->end(), curInst.begin(), curInst.end());
}
void EmitVisitor::initInstruction(SpirvInstruction *inst) {
// Emit the result type if the instruction has a result type.
if (inst->hasResultType()) {
const uint32_t resultTypeId = typeHandler.emitType(inst->getResultType());
inst->setResultTypeId(resultTypeId);
}
// Emit NonUniformEXT decoration (if any).
if (inst->isNonUniform()) {
typeHandler.emitDecoration(getOrAssignResultId<SpirvInstruction>(inst),
spv::Decoration::NonUniformEXT, {});
}
// Emit RelaxedPrecision decoration (if any).
if (inst->isRelaxedPrecision()) {
typeHandler.emitDecoration(getOrAssignResultId<SpirvInstruction>(inst),
spv::Decoration::RelaxedPrecision, {});
}
// Emit NoContraction decoration (if any).
if ((spvOptions.IEEEStrict || inst->isPrecise()) &&
inst->isArithmeticInstruction()) {
typeHandler.emitDecoration(getOrAssignResultId<SpirvInstruction>(inst),
spv::Decoration::NoContraction, {});
}
// According to Section 2.4. Logical Layout of a Module in the SPIR-V spec:
// OpLine is always emitted to the main binary, except for global variables.
// Global variables (variables whose storage class is NOT function) are
// emitted before the main binary. They are allowed to have an OpLine
// associated with them.
bool isGlobalVar = false;
if (auto *var = dyn_cast<SpirvVariable>(inst))
isGlobalVar = var->getStorageClass() != spv::StorageClass::Function;
const auto op = inst->getopcode();
emitDebugLine(op, inst->getSourceLocation(), inst->getSourceRange(),
isGlobalVar ? &globalVarsBinary : &mainBinary,
isa<SpirvDebugScope>(inst));
// Initialize the current instruction for emitting.
curInst.clear();
curInst.push_back(static_cast<uint32_t>(op));
}
void EmitVisitor::initInstruction(spv::Op op, const SourceLocation &loc) {
emitDebugLine(op, loc, {}, &mainBinary);
curInst.clear();
curInst.push_back(static_cast<uint32_t>(op));
}
void EmitVisitor::finalizeInstruction(std::vector<uint32_t> *section) {
assert(section);
curInst[0] |= static_cast<uint32_t>(curInst.size()) << 16;
section->insert(section->end(), curInst.begin(), curInst.end());
}
std::vector<uint32_t> EmitVisitor::takeBinary() {
std::vector<uint32_t> result;
Header header(takeNextId(), getHeaderVersion(featureManager.getTargetEnv()));
auto headerBinary = header.takeBinary();
result.insert(result.end(), headerBinary.begin(), headerBinary.end());
result.insert(result.end(), preambleBinary.begin(), preambleBinary.end());
result.insert(result.end(), debugFileBinary.begin(), debugFileBinary.end());
result.insert(result.end(), debugVariableBinary.begin(),
debugVariableBinary.end());
result.insert(result.end(), annotationsBinary.begin(),
annotationsBinary.end());
result.insert(result.end(), typeConstantBinary.begin(),
typeConstantBinary.end());
result.insert(result.end(), globalVarsBinary.begin(), globalVarsBinary.end());
result.insert(result.end(), richDebugInfo.begin(), richDebugInfo.end());
result.insert(result.end(), mainBinary.begin(), mainBinary.end());
return result;
}
void EmitVisitor::encodeString(llvm::StringRef value) {
const auto &words = string::encodeSPIRVString(value);
curInst.insert(curInst.end(), words.begin(), words.end());
}
bool EmitVisitor::visit(SpirvModule *, Phase) {
// No pre-visit operations needed for SpirvModule.
return true;
}
bool EmitVisitor::visit(SpirvFunction *fn, Phase phase) {
assert(fn);
// Before emitting the function
if (phase == Visitor::Phase::Init) {
const uint32_t returnTypeId = typeHandler.emitType(fn->getReturnType());
const uint32_t functionTypeId = typeHandler.emitType(fn->getFunctionType());
if (fn->isEntryFunctionWrapper())
inEntryFunctionWrapper = true;
// Emit OpFunction
initInstruction(spv::Op::OpFunction, fn->getSourceLocation());
curInst.push_back(returnTypeId);
curInst.push_back(getOrAssignResultId<SpirvFunction>(fn));
curInst.push_back(
fn->isNoInline()
? static_cast<uint32_t>(spv::FunctionControlMask::DontInline)
: static_cast<uint32_t>(spv::FunctionControlMask::MaskNone));
curInst.push_back(functionTypeId);
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvFunction>(fn),
fn->getFunctionName());
// RelaxedPrecision decoration may be applied to an OpFunction instruction.
if (fn->isRelaxedPrecision())
typeHandler.emitDecoration(getOrAssignResultId<SpirvFunction>(fn),
spv::Decoration::RelaxedPrecision, {});
}
// After emitting the function
else if (phase == Visitor::Phase::Done) {
// Emit OpFunctionEnd
initInstruction(spv::Op::OpFunctionEnd, /* SourceLocation */ {});
finalizeInstruction(&mainBinary);
inEntryFunctionWrapper = false;
}
return true;
}
bool EmitVisitor::visit(SpirvBasicBlock *bb, Phase phase) {
assert(bb);
// Before emitting the basic block.
if (phase == Visitor::Phase::Init) {
// Emit OpLabel
initInstruction(spv::Op::OpLabel, /* SourceLocation */ {});
curInst.push_back(getOrAssignResultId<SpirvBasicBlock>(bb));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvBasicBlock>(bb),
bb->getName());
}
// After emitting the basic block
else if (phase == Visitor::Phase::Done) {
assert(bb->hasTerminator());
}
return true;
}
bool EmitVisitor::visit(SpirvCapability *cap) {
initInstruction(cap);
curInst.push_back(static_cast<uint32_t>(cap->getCapability()));
finalizeInstruction(&preambleBinary);
return true;
}
bool EmitVisitor::visit(SpirvExtension *ext) {
initInstruction(ext);
encodeString(ext->getExtensionName());
finalizeInstruction(&preambleBinary);
return true;
}
bool EmitVisitor::visit(SpirvExtInstImport *inst) {
initInstruction(inst);
uint32_t resultId = getOrAssignResultId<SpirvInstruction>(inst);
curInst.push_back(resultId);
StringRef setName = inst->getExtendedInstSetName();
encodeString(setName);
finalizeInstruction(&preambleBinary);
// Remember id if needed later for DebugLine
if ((spvOptions.debugInfoVulkan &&
setName.equals("NonSemantic.Shader.DebugInfo.100")) ||
(!spvOptions.debugInfoVulkan && setName.equals("OpenCL.DebugInfo.100")))
debugInfoExtInstId = resultId;
return true;
}
bool EmitVisitor::visit(SpirvMemoryModel *inst) {
initInstruction(inst);
curInst.push_back(static_cast<uint32_t>(inst->getAddressingModel()));
curInst.push_back(static_cast<uint32_t>(inst->getMemoryModel()));
finalizeInstruction(&preambleBinary);
return true;
}
bool EmitVisitor::visit(SpirvEntryPoint *inst) {
initInstruction(inst);
curInst.push_back(static_cast<uint32_t>(inst->getExecModel()));
curInst.push_back(getOrAssignResultId<SpirvFunction>(inst->getEntryPoint()));
encodeString(inst->getEntryPointName());
for (auto *var : inst->getInterface())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(var));
finalizeInstruction(&preambleBinary);
return true;
}
bool EmitVisitor::visit(SpirvExecutionMode *inst) {
initInstruction(inst);
curInst.push_back(getOrAssignResultId<SpirvFunction>(inst->getEntryPoint()));
curInst.push_back(static_cast<uint32_t>(inst->getExecutionMode()));
if (inst->getopcode() == spv::Op::OpExecutionMode) {
curInst.insert(curInst.end(), inst->getParams().begin(),
inst->getParams().end());
} else {
for (uint32_t param : inst->getParams()) {
curInst.push_back(typeHandler.getOrCreateConstantInt(
llvm::APInt(32, param), context.getUIntType(32),
/*isSpecConst */ false));
}
}
finalizeInstruction(&preambleBinary);
return true;
}
bool EmitVisitor::visit(SpirvString *inst) {
auto it = stringIdMap.find(inst->getString());
if (it != stringIdMap.end())
return true;
uint32_t strId = getOrAssignResultId<SpirvInstruction>(inst);
initInstruction(inst);
curInst.push_back(strId);
encodeString(inst->getString());
finalizeInstruction(&debugFileBinary);
stringIdMap[inst->getString()] = strId;
return true;
}
bool EmitVisitor::visit(SpirvSource *inst) {
// We should either emit OpSource or DebugSource, not both.
// Therefore if rich debug info is being generated, we will skip
// emitting OpSource.
if (spvOptions.debugInfoRich)
return true;
// Return if we already emitted this OpSource.
uint32_t fileId = getSourceFileId(inst);
if (isSourceWithFileEmitted(fileId))
return true;
setFileOfSourceToDebugSourceId(fileId, kEmittedSourceForOpSource);
if (!debugMainFileId)
debugMainFileId = fileId;
initInstruction(inst);
curInst.push_back(static_cast<uint32_t>(inst->getSourceLanguage()));
curInst.push_back(static_cast<uint32_t>(inst->getVersion()));
if (hlslVersion == 0)
hlslVersion = inst->getVersion();
if (inst->hasFile())
curInst.push_back(fileId);
// Chop up the source into multiple segments if it is too long.
llvm::SmallVector<std::string, 2> choppedSrcCode;
if (spvOptions.debugInfoSource && inst->hasFile()) {
choppedSrcCode = getChoppedSourceCode(inst, spvOptions);
if (!choppedSrcCode.empty()) {
// Note: in order to improve performance and avoid multiple copies, we
// encode this (potentially large) string directly into the
// debugFileBinary.
const auto &words = string::encodeSPIRVString(choppedSrcCode.front());
const auto numWordsInInstr = curInst.size() + words.size();
curInst[0] |= static_cast<uint32_t>(numWordsInInstr) << 16;
debugFileBinary.insert(debugFileBinary.end(), curInst.begin(),
curInst.end());
debugFileBinary.insert(debugFileBinary.end(), words.begin(), words.end());
}
}
if (choppedSrcCode.empty()) {
curInst[0] |= static_cast<uint32_t>(curInst.size()) << 16;
debugFileBinary.insert(debugFileBinary.end(), curInst.begin(),
curInst.end());
return true;
}
// Now emit OpSourceContinued for the [second:last] snippet.
for (uint32_t i = 1; i < choppedSrcCode.size(); ++i) {
initInstruction(spv::Op::OpSourceContinued, /* SourceLocation */ {});
// Note: in order to improve performance and avoid multiple copies, we
// encode this (potentially large) string directly into the debugFileBinary.
const auto &words = string::encodeSPIRVString(choppedSrcCode[i]);
const auto numWordsInInstr = curInst.size() + words.size();
curInst[0] |= static_cast<uint32_t>(numWordsInInstr) << 16;
debugFileBinary.insert(debugFileBinary.end(), curInst.begin(),
curInst.end());
debugFileBinary.insert(debugFileBinary.end(), words.begin(), words.end());
}
return true;
}
bool EmitVisitor::visit(SpirvModuleProcessed *inst) {
initInstruction(inst);
encodeString(inst->getProcess());
finalizeInstruction(&annotationsBinary);
return true;
}
bool EmitVisitor::visit(SpirvDecoration *inst) {
initInstruction(inst);
if (inst->getTarget()) {
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getTarget()));
} else {
assert(inst->getTargetFunc() != nullptr);
curInst.push_back(
getOrAssignResultId<SpirvFunction>(inst->getTargetFunc()));
}
if (inst->isMemberDecoration())
curInst.push_back(inst->getMemberIndex());
curInst.push_back(static_cast<uint32_t>(inst->getDecoration()));
if (!inst->getParams().empty()) {
curInst.insert(curInst.end(), inst->getParams().begin(),
inst->getParams().end());
}
if (!inst->getIdParams().empty()) {
for (auto *paramInstr : inst->getIdParams())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(paramInstr));
}
finalizeInstruction(&annotationsBinary);
return true;
}
bool EmitVisitor::visit(SpirvVariable *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(static_cast<uint32_t>(inst->getStorageClass()));
if (inst->hasInitializer())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInitializer()));
finalizeInstruction(inst->getStorageClass() == spv::StorageClass::Function
? &mainBinary
: &globalVarsBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
if (spvOptions.enableReflect && inst->hasBinding() &&
!inst->getHlslUserType().empty()) {
std::pair<llvm::StringRef, llvm::StringRef> splitUserType =
inst->getHlslUserType().split('<');
std::string formattedUserType = splitUserType.first.lower();
// Format and append template arguments.
if (!splitUserType.second.empty()) {
llvm::SmallVector<llvm::StringRef, 4> templateParams;
splitUserType.second.split(templateParams, ", ");
if (templateParams.size() > 0) {
formattedUserType += ":<";
formattedUserType += templateParams[0];
for (size_t i = 1; i < templateParams.size(); i++) {
formattedUserType += ",";
formattedUserType += templateParams[i];
}
}
}
typeHandler.emitDecoration(getOrAssignResultId<SpirvInstruction>(inst),
spv::Decoration::UserTypeGOOGLE,
string::encodeSPIRVString(formattedUserType));
}
return true;
}
bool EmitVisitor::visit(SpirvFunctionParameter *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvLoopMerge *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getMergeBlock()));
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getContinueTarget()));
curInst.push_back(static_cast<uint32_t>(inst->getLoopControlMask()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvSelectionMerge *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getMergeBlock()));
curInst.push_back(static_cast<uint32_t>(inst->getSelectionControlMask()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvBranch *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getTargetLabel()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvBranchConditional *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCondition()));
curInst.push_back(getOrAssignResultId<SpirvBasicBlock>(inst->getTrueLabel()));
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getFalseLabel()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvKill *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvReturn *inst) {
initInstruction(inst);
if (inst->hasReturnValue()) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getReturnValue()));
}
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvSwitch *inst) {
initInstruction(inst);
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSelector()));
curInst.push_back(
getOrAssignResultId<SpirvBasicBlock>(inst->getDefaultLabel()));
for (const auto &target : inst->getTargets()) {
typeHandler.emitIntLiteral(target.first, curInst);
curInst.push_back(getOrAssignResultId<SpirvBasicBlock>(target.second));
}
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvUnreachable *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvAccessChain *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getBase()));
for (const auto index : inst->getIndexes())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(index));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvAtomic *inst) {
const auto op = inst->getopcode();
initInstruction(inst);
if (op != spv::Op::OpAtomicStore && op != spv::Op::OpAtomicFlagClear) {
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
}
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getPointer()));
curInst.push_back(typeHandler.getOrCreateConstantInt(
llvm::APInt(32, static_cast<uint32_t>(inst->getScope())),
context.getUIntType(32), /*isSpecConst */ false));
curInst.push_back(typeHandler.getOrCreateConstantInt(
llvm::APInt(32, static_cast<uint32_t>(inst->getMemorySemantics())),
context.getUIntType(32), /*isSpecConst */ false));
if (inst->hasComparator())
curInst.push_back(typeHandler.getOrCreateConstantInt(
llvm::APInt(32,
static_cast<uint32_t>(inst->getMemorySemanticsUnequal())),
context.getUIntType(32), /*isSpecConst */ false));
if (inst->hasValue())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getValue()));
if (inst->hasComparator())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getComparator()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvBarrier *inst) {
const uint32_t executionScopeId =
inst->isControlBarrier()
? typeHandler.getOrCreateConstantInt(
llvm::APInt(32,
static_cast<uint32_t>(inst->getExecutionScope())),
context.getUIntType(32), /*isSpecConst */ false)
: 0;
const uint32_t memoryScopeId = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, static_cast<uint32_t>(inst->getMemoryScope())),
context.getUIntType(32), /*isSpecConst */ false);
const uint32_t memorySemanticsId = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, static_cast<uint32_t>(inst->getMemorySemantics())),
context.getUIntType(32), /* isSpecConst */ false);
initInstruction(inst);
if (inst->isControlBarrier())
curInst.push_back(executionScopeId);
curInst.push_back(memoryScopeId);
curInst.push_back(memorySemanticsId);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvBinaryOp *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand1()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand2()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvBitFieldExtract *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getBase()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOffset()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getCount()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvBitFieldInsert *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getBase()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getInsert()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOffset()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getCount()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvConstantBoolean *inst) {
typeHandler.getOrCreateConstant(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvConstantInteger *inst) {
// Note: Since array types need to create uint 32-bit constants for result-id
// of array length, the typeHandler keeps track of uint32 constant uniqueness.
// Therefore emitting uint32 constants should be handled by the typeHandler.
typeHandler.getOrCreateConstant(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvConstantFloat *inst) {
typeHandler.getOrCreateConstant(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvConstantComposite *inst) {
typeHandler.getOrCreateConstant(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvConstantNull *inst) {
typeHandler.getOrCreateConstant(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvUndef *inst) {
typeHandler.getOrCreateUndef(inst);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvCompositeConstruct *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
for (const auto constituent : inst->getConstituents())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(constituent));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvCompositeExtract *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getComposite()));
for (const auto constituent : inst->getIndexes())
curInst.push_back(constituent);
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvCompositeInsert *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getObject()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getComposite()));
for (const auto constituent : inst->getIndexes())
curInst.push_back(constituent);
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvEmitVertex *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvEndPrimitive *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvExtInst *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getInstruction());
for (const auto operand : inst->getOperands())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(operand));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvFunctionCall *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvFunction>(inst->getFunction()));
for (const auto arg : inst->getArgs())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(arg));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvGroupNonUniformOp *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(typeHandler.getOrCreateConstantInt(
llvm::APInt(32, static_cast<uint32_t>(inst->getExecutionScope())),
context.getUIntType(32), /* isSpecConst */ false));
if (inst->hasGroupOp())
curInst.push_back(static_cast<uint32_t>(inst->getGroupOp()));
for (auto *operand : inst->getOperands())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(operand));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvImageOp *inst) {
initInstruction(inst);
if (!inst->isImageWrite()) {
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
}
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getImage()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCoordinate()));
if (inst->isImageWrite())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getTexelToWrite()));
if (inst->hasDref())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getDref()));
if (inst->hasComponent())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getComponent()));
curInst.push_back(static_cast<uint32_t>(inst->getImageOperandsMask()));
if (inst->getImageOperandsMask() != spv::ImageOperandsMask::MaskNone) {
if (inst->hasBias())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getBias()));
if (inst->hasLod())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getLod()));
if (inst->hasGrad()) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getGradDx()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getGradDy()));
}
if (inst->hasConstOffset())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getConstOffset()));
if (inst->hasOffset())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getOffset()));
if (inst->hasConstOffsets())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getConstOffsets()));
if (inst->hasSample())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getSample()));
if (inst->hasMinLod())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getMinLod()));
}
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvImageQuery *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getImage()));
if (inst->hasCoordinate())
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCoordinate()));
if (inst->hasLod())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getLod()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvImageSparseTexelsResident *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getResidentCode()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvImageTexelPointer *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getImage()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCoordinate()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSample()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvLoad *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getPointer()));
if (inst->hasMemoryAccessSemantics()) {
spv::MemoryAccessMask memoryAccess = inst->getMemoryAccess();
curInst.push_back(static_cast<uint32_t>(memoryAccess));
if (inst->hasAlignment()) {
assert(static_cast<uint32_t>(memoryAccess) &
static_cast<uint32_t>(spv::MemoryAccessMask::Aligned));
curInst.push_back(inst->getAlignment());
}
}
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvCopyObject *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getPointer()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvSampledImage *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getImage()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSampler()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvSelect *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCondition()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getTrueObject()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getFalseObject()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvSpecConstantBinaryOp *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(static_cast<uint32_t>(inst->getSpecConstantopcode()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand1()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand2()));
finalizeInstruction(&typeConstantBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvSpecConstantUnaryOp *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(static_cast<uint32_t>(inst->getSpecConstantopcode()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvStore *inst) {
initInstruction(inst);
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getPointer()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getObject()));
if (inst->hasMemoryAccessSemantics()) {
spv::MemoryAccessMask memoryAccess = inst->getMemoryAccess();
curInst.push_back(static_cast<uint32_t>(memoryAccess));
if (inst->hasAlignment()) {
assert(static_cast<uint32_t>(memoryAccess) &
static_cast<uint32_t>(spv::MemoryAccessMask::Aligned));
curInst.push_back(inst->getAlignment());
}
}
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvNullaryOp *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvUnaryOp *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getOperand()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvVectorShuffle *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getVec1()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getVec2()));
for (const auto component : inst->getComponents())
curInst.push_back(component);
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvArrayLength *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getStructure()));
curInst.push_back(inst->getArrayMember());
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvRayTracingOpNV *inst) {
initInstruction(inst);
if (inst->hasResultType()) {
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
}
for (const auto operand : inst->getOperands())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(operand));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvDemoteToHelperInvocation *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvIsHelperInvocationEXT *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvDebugInfoNone *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
finalizeInstruction(&richDebugInfo);
return true;
}
void EmitVisitor::generateDebugSource(uint32_t fileId, uint32_t textId,
SpirvDebugSource *inst) {
initInstruction(inst);
curInst.push_back(typeHandler.emitType(context.getVoidType()));
uint32_t resultId = getOrAssignResultId<SpirvInstruction>(inst);
curInst.push_back(resultId);
curInst.push_back(debugInfoExtInstId);
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(fileId);
if (textId)
curInst.push_back(textId);
finalizeInstruction(&richDebugInfo);
emittedSource[fileId] = resultId;
}
void EmitVisitor::generateDebugSourceContinued(uint32_t textId,
SpirvDebugSource *inst) {
initInstruction(spv::Op::OpExtInst, /* SourceLocation */ {});
curInst.push_back(inst->getResultTypeId());
curInst.push_back(takeNextId());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(102u); // DebugSourceContinued
curInst.push_back(textId);
finalizeInstruction(&richDebugInfo);
}
void EmitVisitor::generateChoppedSource(uint32_t fileId,
SpirvDebugSource *inst) {
// Chop up the source into multiple segments if it is too long.
llvm::SmallVector<std::string, 2> choppedSrcCode;
uint32_t textId = 0;
if (spvOptions.debugInfoSource) {
std::string text = inst->getContent();
if (text.empty())
text = ReadSourceCode(inst->getFile(), spvOptions);
if (!text.empty()) {
// Maximum characters for DebugSource and DebugSourceContinued
// OpString literal minus terminating null.
uint32_t maxChar = spvOptions.debugSourceLen * sizeof(uint32_t) - 1;
chopString(text, &choppedSrcCode, maxChar, maxChar);
}
if (!choppedSrcCode.empty())
textId = getOrCreateOpStringId(choppedSrcCode.front());
}
// Generate DebugSource
generateDebugSource(fileId, textId, inst);
// Now emit DebugSourceContinued for the [second:last] snippets.
for (uint32_t i = 1; i < choppedSrcCode.size(); ++i) {
textId = getOrCreateOpStringId(choppedSrcCode[i]);
generateDebugSourceContinued(textId, inst);
}
}
bool EmitVisitor::visit(SpirvDebugSource *inst) {
// Emit the OpString for the file name.
uint32_t fileId = getOrCreateOpStringId(inst->getFile());
if (!debugMainFileId)
debugMainFileId = fileId;
if (emittedSource[fileId] != 0)
return true;
if (spvOptions.debugInfoVulkan) {
generateChoppedSource(fileId, inst);
return true;
}
// OpenCL.DebugInfo.100
// TODO(greg-lunarg): This logic does not currently handle text that is too
// long for a string. In this case, the entire compiler returns without
// producing a SPIR-V file. Once DebugSourceContinued is added to
// OpenCL.DebugInfo.100, the logic below can be removed and the
// NonSemantic.Shader.DebugInfo.100 logic above can be used for both cases.
uint32_t textId = 0;
if (spvOptions.debugInfoSource) {
auto text = ReadSourceCode(inst->getFile(), spvOptions);
if (!text.empty())
textId = getOrCreateOpStringId(text);
}
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
uint32_t resultId = getOrAssignResultId<SpirvInstruction>(inst);
curInst.push_back(resultId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(fileId);
if (textId)
curInst.push_back(textId);
finalizeInstruction(&richDebugInfo);
emittedSource[fileId] = resultId;
return true;
}
bool EmitVisitor::visit(SpirvDebugCompilationUnit *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getSpirvVersion()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getDwarfVersion()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(
static_cast<uint32_t>(inst->getLanguage())));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugLexicalBlock *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugScope *inst) {
// Technically entry function wrappers do not exist in HLSL. They
// are just created by DXC. We do not want to emit DebugScope for
// it.
if (inEntryFunctionWrapper)
return true;
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getScope()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvDebugFunctionDeclaration *inst) {
uint32_t nameId = getOrCreateOpStringId(inst->getDebugName());
uint32_t linkageNameId = getOrCreateOpStringId(inst->getLinkageName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(nameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
curInst.push_back(linkageNameId);
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getFlags()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugFunction *inst) {
uint32_t nameId = getOrCreateOpStringId(inst->getDebugName());
uint32_t linkageNameId = getOrCreateOpStringId(inst->getLinkageName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(nameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
curInst.push_back(linkageNameId);
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getFlags()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getScopeLine()));
/// Only emit the function Id for OpenCL debug info, Vulkan debug info
/// disallows forward references
if (!spvOptions.debugInfoVulkan) {
auto *fn = inst->getSpirvFunction();
if (fn) {
curInst.push_back(getOrAssignResultId<SpirvFunction>(fn));
} else {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugInfoNone()));
}
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugFunctionDefinition *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugFunction()));
curInst.push_back(getOrAssignResultId<SpirvFunction>(inst->getFunction()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvDebugEntryPoint *inst) {
uint32_t sigId = getOrCreateOpStringId(inst->getSignature());
uint32_t argId = getOrCreateOpStringId(inst->getArgs());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getEntryPoint()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getCompilationUnit()));
curInst.push_back(sigId);
curInst.push_back(argId);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeBasic *inst) {
uint32_t typeNameId = getOrCreateOpStringId(inst->getDebugName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(typeNameId);
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSize()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getEncoding()));
// Vulkan needs flag operand. TODO(greg-lunarg): Set flag correctly.
if (spvOptions.debugInfoVulkan)
curInst.push_back(getLiteralEncodedForDebugInfo(0));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeVector *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getElementType()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getElementCount()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeMatrix *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getVectorType()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getVectorCount()));
curInst.push_back(getLiteralEncodedForDebugInfo(1));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeArray *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getElementType()));
// This is a reverse order of dimensions, thereby emitting in a reverse order.
for (auto it = inst->getElementCount().rbegin();
it != inst->getElementCount().rend(); ++it) {
const auto countId = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, *it), context.getUIntType(32),
/* isSpecConst */ false);
curInst.push_back(countId);
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeFunction *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getDebugFlags()));
if (inst->getReturnType()) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getReturnType()));
} else {
// If return type is void, the return instruction must be OpTypeVoid.
curInst.push_back(typeHandler.emitType(context.getVoidType()));
}
for (auto *paramType : inst->getParamTypes()) {
curInst.push_back(getOrAssignResultId<SpirvInstruction>(paramType));
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeComposite *inst) {
uint32_t typeNameId = getOrCreateOpStringId(inst->getDebugName());
uint32_t linkageNameId = getOrCreateOpStringId(inst->getLinkageName());
const auto size = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, inst->getSizeInBits()), context.getUIntType(32),
/* isSpecConst */ false);
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(typeNameId);
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getTag()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
curInst.push_back(linkageNameId);
if (inst->getDebugInfoNone()) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugInfoNone()));
} else {
curInst.push_back(size);
}
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getDebugFlags()));
for (auto *member : inst->getMembers()) {
curInst.push_back(getOrAssignResultId<SpirvInstruction>(member));
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeMember *inst) {
uint32_t typeNameId = getOrCreateOpStringId(inst->getDebugName());
const auto offset = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, inst->getOffsetInBits()), context.getUIntType(32),
/* isSpecConst */ false);
const auto size = typeHandler.getOrCreateConstantInt(
llvm::APInt(32, inst->getSizeInBits()), context.getUIntType(32),
/* isSpecConst */ false);
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(typeNameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
/// Only emit the parent reference for OpenCL debug info. Vulkan debug info
/// breaks reference cycle between DebugTypeComposite and DebugTypeMember,
/// with only the composite referencing its members and not the reverse.
if (!spvOptions.debugInfoVulkan) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
}
curInst.push_back(offset);
curInst.push_back(size);
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getDebugFlags()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeTemplate *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getTarget()));
for (auto *param : inst->getParams()) {
curInst.push_back(getOrAssignResultId<SpirvInstruction>(param));
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugTypeTemplateParameter *inst) {
uint32_t typeNameId = getOrCreateOpStringId(inst->getDebugName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(typeNameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getActualType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getValue()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugLocalVariable *inst) {
uint32_t nameId = getOrCreateOpStringId(inst->getDebugName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(nameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getFlags()));
if (inst->getArgNumber().hasValue()) {
curInst.push_back(
getLiteralEncodedForDebugInfo(inst->getArgNumber().getValue()));
}
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugDeclare *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugLocalVariable()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getVariable()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugExpression()));
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvDebugGlobalVariable *inst) {
uint32_t nameId = getOrCreateOpStringId(inst->getDebugName());
uint32_t linkageNameId = getOrCreateOpStringId(inst->getLinkageName());
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
curInst.push_back(nameId);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getDebugType()));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getSource()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getLine()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getColumn()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getParentScope()));
curInst.push_back(linkageNameId);
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getVariable()));
curInst.push_back(getLiteralEncodedForDebugInfo(inst->getFlags()));
if (inst->getStaticMemberDebugDecl().hasValue())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(
inst->getStaticMemberDebugDecl().getValue()));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvDebugExpression *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getDebugOpcode());
for (const auto &op : inst->getOperations())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(op));
finalizeInstruction(&richDebugInfo);
return true;
}
bool EmitVisitor::visit(SpirvRayQueryOpKHR *inst) {
initInstruction(inst);
if (inst->hasResultType()) {
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
}
for (const auto operand : inst->getOperands())
curInst.push_back(getOrAssignResultId<SpirvInstruction>(operand));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvReadClock *inst) {
initInstruction(inst);
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst->getScope()));
finalizeInstruction(&mainBinary);
emitDebugNameForInstruction(getOrAssignResultId<SpirvInstruction>(inst),
inst->getDebugName());
return true;
}
bool EmitVisitor::visit(SpirvRayTracingTerminateOpKHR *inst) {
initInstruction(inst);
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvIntrinsicInstruction *inst) {
initInstruction(inst);
if (inst->hasResultType()) {
curInst.push_back(inst->getResultTypeId());
curInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
}
if (inst->getInstructionSet()) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getInstructionSet()));
curInst.push_back(inst->getInstruction());
}
for (const auto operand : inst->getOperands()) {
auto literalOperand = dyn_cast<SpirvConstant>(operand);
if (literalOperand && literalOperand->isLiteral()) {
typeHandler.emitLiteral(literalOperand, curInst);
} else {
curInst.push_back(getOrAssignResultId<SpirvInstruction>(operand));
}
}
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvEmitMeshTasksEXT *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getXDimension()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getYDimension()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getZDimension()));
if (inst->getPayload() != nullptr) {
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getPayload()));
}
finalizeInstruction(&mainBinary);
return true;
}
bool EmitVisitor::visit(SpirvSetMeshOutputsEXT *inst) {
initInstruction(inst);
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getVertexCount()));
curInst.push_back(
getOrAssignResultId<SpirvInstruction>(inst->getPrimitiveCount()));
finalizeInstruction(&mainBinary);
return true;
}
// EmitTypeHandler ------
void EmitTypeHandler::initTypeInstruction(spv::Op op) {
curTypeInst.clear();
curTypeInst.push_back(static_cast<uint32_t>(op));
}
void EmitTypeHandler::finalizeTypeInstruction() {
curTypeInst[0] |= static_cast<uint32_t>(curTypeInst.size()) << 16;
typeConstantBinary->insert(typeConstantBinary->end(), curTypeInst.begin(),
curTypeInst.end());
}
uint32_t EmitTypeHandler::getResultIdForType(const SpirvType *type,
bool *alreadyExists) {
assert(alreadyExists);
auto foundType = emittedTypes.find(type);
if (foundType != emittedTypes.end()) {
*alreadyExists = true;
return foundType->second;
}
*alreadyExists = false;
const uint32_t id = takeNextIdFunction();
emittedTypes[type] = id;
return id;
}
uint32_t EmitTypeHandler::getOrCreateConstant(SpirvConstant *inst) {
if (auto *constInt = dyn_cast<SpirvConstantInteger>(inst)) {
return getOrCreateConstantInt(constInt->getValue(),
constInt->getResultType(),
inst->isSpecConstant(), inst);
} else if (auto *constFloat = dyn_cast<SpirvConstantFloat>(inst)) {
return getOrCreateConstantFloat(constFloat);
} else if (auto *constComposite = dyn_cast<SpirvConstantComposite>(inst)) {
return getOrCreateConstantComposite(constComposite);
} else if (auto *constNull = dyn_cast<SpirvConstantNull>(inst)) {
return getOrCreateConstantNull(constNull);
} else if (auto *constBool = dyn_cast<SpirvConstantBoolean>(inst)) {
return getOrCreateConstantBool(constBool);
} else if (auto *constUndef = dyn_cast<SpirvUndef>(inst)) {
return getOrCreateUndef(constUndef);
}
llvm_unreachable("cannot emit unknown constant type");
}
uint32_t EmitTypeHandler::getOrCreateConstantBool(SpirvConstantBoolean *inst) {
const auto index = static_cast<uint32_t>(inst->getValue());
const bool isSpecConst = inst->isSpecConstant();
// The values of special constants are not unique. We should not reuse their
// values.
if (!isSpecConst && emittedConstantBools[index]) {
// Already emitted this constant value. Reuse.
inst->setResultId(emittedConstantBools[index]->getResultId());
} else if (isSpecConst && emittedSpecConstantInstructions.find(inst) !=
emittedSpecConstantInstructions.end()) {
// We've already emitted this SpecConstant. Reuse.
return inst->getResultId();
} else {
// Constant wasn't emitted in the past.
const uint32_t typeId = emitType(inst->getResultType());
initTypeInstruction(inst->getopcode());
curTypeInst.push_back(typeId);
curTypeInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
finalizeTypeInstruction();
// Remember this constant for the future (if not a spec constant)
if (isSpecConst) {
emittedSpecConstantInstructions.insert(inst);
} else {
emittedConstantBools[index] = inst;
}
}
return inst->getResultId();
}
uint32_t EmitTypeHandler::getOrCreateConstantNull(SpirvConstantNull *inst) {
auto found =
std::find_if(emittedConstantNulls.begin(), emittedConstantNulls.end(),
[inst](SpirvConstantNull *cachedConstant) {
return *cachedConstant == *inst;
});
if (found != emittedConstantNulls.end()) {
// We have already emitted this constant. Reuse.
inst->setResultId((*found)->getResultId());
} else {
// Constant wasn't emitted in the past.
const uint32_t typeId = emitType(inst->getResultType());
initTypeInstruction(spv::Op::OpConstantNull);
curTypeInst.push_back(typeId);
curTypeInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
finalizeTypeInstruction();
// Remember this constant for the future
emittedConstantNulls.push_back(inst);
}
return inst->getResultId();
}
uint32_t EmitTypeHandler::getOrCreateUndef(SpirvUndef *inst) {
auto canonicalType = inst->getAstResultType().getCanonicalType();
auto found = std::find_if(
emittedUndef.begin(), emittedUndef.end(),
[canonicalType](SpirvUndef *cached) {
return cached->getAstResultType().getCanonicalType() == canonicalType;
});
if (found != emittedUndef.end()) {
// We have already emitted this constant. Reuse.
inst->setResultId((*found)->getResultId());
return inst->getResultId();
}
// Constant wasn't emitted in the past.
const uint32_t typeId = emitType(inst->getResultType());
initTypeInstruction(inst->getopcode());
curTypeInst.push_back(typeId);
curTypeInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
finalizeTypeInstruction();
// Remember this constant for the future
emittedUndef.push_back(inst);
return inst->getResultId();
}
uint32_t EmitTypeHandler::getOrCreateConstantFloat(SpirvConstantFloat *inst) {
llvm::APFloat value = inst->getValue();
const SpirvType *type = inst->getResultType();
const bool isSpecConst = inst->isSpecConstant();
assert(isa<FloatType>(type));
const auto *floatType = dyn_cast<FloatType>(type);
const auto typeBitwidth = floatType->getBitwidth();
const auto valueBitwidth = llvm::APFloat::getSizeInBits(value.getSemantics());
auto valueToUse = value;
// If the type and the value have different widths, we need to convert the
// value to the width of the type. Error out if the conversion is lossy.
if (valueBitwidth != typeBitwidth) {
bool losesInfo = false;
const llvm::fltSemantics &targetSemantics =
typeBitwidth == 16 ? llvm::APFloat::IEEEhalf
: typeBitwidth == 32 ? llvm::APFloat::IEEEsingle
: llvm::APFloat::IEEEdouble;
const auto status = valueToUse.convert(
targetSemantics, llvm::APFloat::roundingMode::rmTowardZero, &losesInfo);
if (status != llvm::APFloat::opStatus::opOK &&
status != llvm::APFloat::opStatus::opInexact) {
emitError(
"evaluating float literal %0 at a lower bitwidth loses information",
{})
// Converting from 16bit to 32/64-bit won't lose information.
// So only 32/64-bit values can reach here.
<< std::to_string(valueBitwidth == 32 ? valueToUse.convertToFloat()
: valueToUse.convertToDouble());
return 0;
}
}
auto valueTypePair = std::pair<uint64_t, const SpirvType *>(
valueToUse.bitcastToAPInt().getZExtValue(), type);
// The values of special constants are not unique. We should not reuse their
// values.
if (!isSpecConst) {
// If this constant has already been emitted, return its result-id.
auto foundResultId = emittedConstantFloats.find(valueTypePair);
if (foundResultId != emittedConstantFloats.end()) {
const uint32_t existingConstantResultId = foundResultId->second;
inst->setResultId(existingConstantResultId);
return existingConstantResultId;
}
} else if (emittedSpecConstantInstructions.find(inst) !=
emittedSpecConstantInstructions.end()) {
// We've already emitted this SpecConstant. Reuse.
return inst->getResultId();
}
// Start constructing the instruction
const uint32_t typeId = emitType(type);
initTypeInstruction(inst->getopcode());
curTypeInst.push_back(typeId);
const uint32_t constantResultId = getOrAssignResultId<SpirvInstruction>(inst);
curTypeInst.push_back(constantResultId);
// Start constructing the value word / words
if (typeBitwidth == 16) {
// According to the SPIR-V Spec:
// When the type's bit width is less than 32-bits, the literal's value
// appears in the low-order bits of the word, and the high-order bits must
// be 0 for a floating-point type.
curTypeInst.push_back(
static_cast<uint32_t>(valueToUse.bitcastToAPInt().getZExtValue()));
} else if (typeBitwidth == 32) {
curTypeInst.push_back(
cast::BitwiseCast<uint32_t, float>(valueToUse.convertToFloat()));
} else {
// TODO: The ordering of the 2 words depends on the endian-ness of the
// host machine.
struct wideFloat {
uint32_t word0;
uint32_t word1;
};
wideFloat words =
cast::BitwiseCast<wideFloat, double>(valueToUse.convertToDouble());
curTypeInst.push_back(words.word0);
curTypeInst.push_back(words.word1);
}
finalizeTypeInstruction();
// Remember this constant for future (if not a SpecConstant)
if (isSpecConst) {
emittedSpecConstantInstructions.insert(inst);
} else {
emittedConstantFloats[valueTypePair] = constantResultId;
}
return constantResultId;
}
uint32_t
EmitTypeHandler::getOrCreateConstantInt(llvm::APInt value,
const SpirvType *type, bool isSpecConst,
SpirvInstruction *constantInstruction) {
auto valueTypePair =
std::pair<uint64_t, const SpirvType *>(value.getZExtValue(), type);
// The values of special constants are not unique. We should not reuse their
// values.
if (!isSpecConst) {
// If this constant has already been emitted, return its result-id.
auto foundResultId = emittedConstantInts.find(valueTypePair);
if (foundResultId != emittedConstantInts.end()) {
const uint32_t existingConstantResultId = foundResultId->second;
if (constantInstruction)
constantInstruction->setResultId(existingConstantResultId);
return existingConstantResultId;
}
} else if (emittedSpecConstantInstructions.find(constantInstruction) !=
emittedSpecConstantInstructions.end()) {
// We've already emitted this SpecConstant. Reuse.
return constantInstruction->getResultId();
}
assert(isa<IntegerType>(type));
const auto *intType = dyn_cast<IntegerType>(type);
const auto bitwidth = intType->getBitwidth();
const auto isSigned = intType->isSignedInt();
// Start constructing the instruction
const uint32_t typeId = emitType(type);
initTypeInstruction(isSpecConst ? spv::Op::OpSpecConstant
: spv::Op::OpConstant);
curTypeInst.push_back(typeId);
// Assign a result-id if one has not been provided.
uint32_t constantResultId = 0;
if (constantInstruction)
constantResultId =
getOrAssignResultId<SpirvInstruction>(constantInstruction);
else
constantResultId = takeNextIdFunction();
curTypeInst.push_back(constantResultId);
// Start constructing the value word / words
// For 16-bit and 32-bit cases, the value occupies 1 word in the instruction
if (bitwidth == 16 || bitwidth == 32) {
if (isSigned) {
curTypeInst.push_back(static_cast<int32_t>(value.getSExtValue()));
} else {
curTypeInst.push_back(static_cast<uint32_t>(value.getZExtValue()));
}
}
// 64-bit cases
else {
struct wideInt {
uint32_t word0;
uint32_t word1;
};
wideInt words;
if (isSigned) {
words = cast::BitwiseCast<wideInt, int64_t>(value.getSExtValue());
} else {
words = cast::BitwiseCast<wideInt, uint64_t>(value.getZExtValue());
}
curTypeInst.push_back(words.word0);
curTypeInst.push_back(words.word1);
}
finalizeTypeInstruction();
// Remember this constant for future
if (isSpecConst) {
emittedSpecConstantInstructions.insert(constantInstruction);
} else {
emittedConstantInts[valueTypePair] = constantResultId;
}
return constantResultId;
}
uint32_t
EmitTypeHandler::getOrCreateConstantComposite(SpirvConstantComposite *inst) {
// First make sure all constituents have been visited and have a result-id.
for (auto constituent : inst->getConstituents())
getOrCreateConstant(constituent);
// SpecConstant instructions are not unique, so we should not re-use existing
// spec constants.
const bool isSpecConst = inst->isSpecConstant();
SpirvConstantComposite **found = nullptr;
if (!isSpecConst) {
found = std::find_if(
emittedConstantComposites.begin(), emittedConstantComposites.end(),
[inst](SpirvConstantComposite *cachedConstant) {
if (inst->getopcode() != cachedConstant->getopcode())
return false;
auto instConstituents = inst->getConstituents();
auto cachedConstituents = cachedConstant->getConstituents();
if (instConstituents.size() != cachedConstituents.size())
return false;
for (size_t i = 0; i < instConstituents.size(); ++i)
if (instConstituents[i]->getResultId() !=
cachedConstituents[i]->getResultId())
return false;
return true;
});
} else if (emittedSpecConstantInstructions.find(inst) !=
emittedSpecConstantInstructions.end()) {
return inst->getResultId();
}
if (!isSpecConst && found != emittedConstantComposites.end()) {
// We have already emitted this constant. Reuse.
inst->setResultId((*found)->getResultId());
} else if (isSpecConst && emittedSpecConstantInstructions.find(inst) !=
emittedSpecConstantInstructions.end()) {
// We've already emitted this SpecConstant. Reuse.
return inst->getResultId();
} else {
// Constant wasn't emitted in the past.
const uint32_t typeId = emitType(inst->getResultType());
initTypeInstruction(isSpecConst ? spv::Op::OpSpecConstantComposite
: spv::Op::OpConstantComposite);
curTypeInst.push_back(typeId);
curTypeInst.push_back(getOrAssignResultId<SpirvInstruction>(inst));
for (auto constituent : inst->getConstituents())
curTypeInst.push_back(getOrAssignResultId<SpirvInstruction>(constituent));
finalizeTypeInstruction();
// Remember this constant for the future
if (isSpecConst) {
emittedSpecConstantInstructions.insert(inst);
} else {
emittedConstantComposites.push_back(inst);
}
}
return inst->getResultId();
}
static inline bool
isFieldMergeWithPrevious(const StructType::FieldInfo &previous,
const StructType::FieldInfo &field) {
if (previous.fieldIndex == field.fieldIndex) {
// Right now, the only reason for those indices to be shared is if both
// are merged bitfields.
assert(previous.bitfield.hasValue() && field.bitfield.hasValue());
}
return previous.fieldIndex == field.fieldIndex;
}
uint32_t EmitTypeHandler::emitType(const SpirvType *type) {
// First get the decorations that would apply to this type.
bool alreadyExists = false;
const uint32_t id = getResultIdForType(type, &alreadyExists);
// If the type has already been emitted, we just need to return its
// <result-id>.
if (alreadyExists)
return id;
// Emit OpName for the type (if any).
emitNameForType(type->getName(), id);
if (isa<VoidType>(type)) {
initTypeInstruction(spv::Op::OpTypeVoid);
curTypeInst.push_back(id);
finalizeTypeInstruction();
}
// Boolean types
else if (isa<BoolType>(type)) {
initTypeInstruction(spv::Op::OpTypeBool);
curTypeInst.push_back(id);
finalizeTypeInstruction();
}
// Integer types
else if (const auto *intType = dyn_cast<IntegerType>(type)) {
initTypeInstruction(spv::Op::OpTypeInt);
curTypeInst.push_back(id);
curTypeInst.push_back(intType->getBitwidth());
curTypeInst.push_back(intType->isSignedInt() ? 1 : 0);
finalizeTypeInstruction();
}
// Float types
else if (const auto *floatType = dyn_cast<FloatType>(type)) {
initTypeInstruction(spv::Op::OpTypeFloat);
curTypeInst.push_back(id);
curTypeInst.push_back(floatType->getBitwidth());
finalizeTypeInstruction();
}
// Vector types
else if (const auto *vecType = dyn_cast<VectorType>(type)) {
const uint32_t elementTypeId = emitType(vecType->getElementType());
initTypeInstruction(spv::Op::OpTypeVector);
curTypeInst.push_back(id);
curTypeInst.push_back(elementTypeId);
curTypeInst.push_back(vecType->getElementCount());
finalizeTypeInstruction();
}
// Matrix types
else if (const auto *matType = dyn_cast<MatrixType>(type)) {
const uint32_t vecTypeId = emitType(matType->getVecType());
initTypeInstruction(spv::Op::OpTypeMatrix);
curTypeInst.push_back(id);
curTypeInst.push_back(vecTypeId);
curTypeInst.push_back(matType->getVecCount());
finalizeTypeInstruction();
// Note that RowMajor and ColMajor decorations only apply to structure
// members, and should not be handled here.
}
// Image types
else if (const auto *imageType = dyn_cast<ImageType>(type)) {
const uint32_t sampledTypeId = emitType(imageType->getSampledType());
initTypeInstruction(spv::Op::OpTypeImage);
curTypeInst.push_back(id);
curTypeInst.push_back(sampledTypeId);
curTypeInst.push_back(static_cast<uint32_t>(imageType->getDimension()));
curTypeInst.push_back(static_cast<uint32_t>(imageType->getDepth()));
curTypeInst.push_back(imageType->isArrayedImage() ? 1 : 0);
curTypeInst.push_back(imageType->isMSImage() ? 1 : 0);
curTypeInst.push_back(static_cast<uint32_t>(imageType->withSampler()));
curTypeInst.push_back(static_cast<uint32_t>(imageType->getImageFormat()));
finalizeTypeInstruction();
}
// Sampler types
else if (isa<SamplerType>(type)) {
initTypeInstruction(spv::Op::OpTypeSampler);
curTypeInst.push_back(id);
finalizeTypeInstruction();
}
// SampledImage types
else if (const auto *sampledImageType = dyn_cast<SampledImageType>(type)) {
const uint32_t imageTypeId = emitType(sampledImageType->getImageType());
initTypeInstruction(spv::Op::OpTypeSampledImage);
curTypeInst.push_back(id);
curTypeInst.push_back(imageTypeId);
finalizeTypeInstruction();
}
// Array types
else if (const auto *arrayType = dyn_cast<ArrayType>(type)) {
// Emit the OpConstant instruction that is needed to get the result-id for
// the array length.
const auto length = getOrCreateConstantInt(
llvm::APInt(32, arrayType->getElementCount()), context.getUIntType(32),
/* isSpecConst */ false);
// Emit the OpTypeArray instruction
const uint32_t elemTypeId = emitType(arrayType->getElementType());
initTypeInstruction(spv::Op::OpTypeArray);
curTypeInst.push_back(id);
curTypeInst.push_back(elemTypeId);
curTypeInst.push_back(length);
finalizeTypeInstruction();
auto stride = arrayType->getStride();
if (stride.hasValue())
emitDecoration(id, spv::Decoration::ArrayStride, {stride.getValue()});
}
// RuntimeArray types
else if (const auto *raType = dyn_cast<RuntimeArrayType>(type)) {
const uint32_t elemTypeId = emitType(raType->getElementType());
initTypeInstruction(spv::Op::OpTypeRuntimeArray);
curTypeInst.push_back(id);
curTypeInst.push_back(elemTypeId);
finalizeTypeInstruction();
auto stride = raType->getStride();
if (stride.hasValue())
emitDecoration(id, spv::Decoration::ArrayStride, {stride.getValue()});
}
// Structure types
else if (const auto *structType = dyn_cast<StructType>(type)) {
std::vector<std::reference_wrapper<const StructType::FieldInfo>>
fieldsToGenerate;
{
llvm::ArrayRef<StructType::FieldInfo> fields = structType->getFields();
for (size_t i = 0; i < fields.size(); ++i) {
if (i > 0 && isFieldMergeWithPrevious(fields[i - 1], fields[i]))
continue;
fieldsToGenerate.push_back(std::ref(fields[i]));
}
}
// Emit OpMemberName for the struct members.
for (size_t i = 0; i < fieldsToGenerate.size(); ++i)
emitNameForType(fieldsToGenerate[i].get().name, id, i);
llvm::SmallVector<uint32_t, 4> fieldTypeIds;
for (auto &field : fieldsToGenerate)
fieldTypeIds.push_back(emitType(field.get().type));
for (size_t i = 0; i < fieldsToGenerate.size(); ++i) {
const auto &field = fieldsToGenerate[i].get();
// Offset decorations
if (field.offset.hasValue()) {
emitDecoration(id, spv::Decoration::Offset, {field.offset.getValue()},
i);
}
// MatrixStride decorations
if (field.matrixStride.hasValue())
emitDecoration(id, spv::Decoration::MatrixStride,
{field.matrixStride.getValue()}, i);
// RowMajor/ColMajor decorations
if (field.isRowMajor.hasValue())
emitDecoration(id,
field.isRowMajor.getValue() ? spv::Decoration::RowMajor
: spv::Decoration::ColMajor,
{}, i);
// RelaxedPrecision decorations
if (field.isRelaxedPrecision)
emitDecoration(id, spv::Decoration::RelaxedPrecision, {}, i);
// NonWritable decorations
if (structType->isReadOnly())
emitDecoration(id, spv::Decoration::NonWritable, {}, i);
}
// Emit Block or BufferBlock decorations if necessary.
auto interfaceType = structType->getInterfaceType();
if (interfaceType == StructInterfaceType::StorageBuffer)
// The BufferBlock decoration requires SPIR-V version 1.3 or earlier.
emitDecoration(id,
featureManager.isTargetEnvSpirv1p4OrAbove()
? spv::Decoration::Block
: spv::Decoration::BufferBlock,
{});
else if (interfaceType == StructInterfaceType::UniformBuffer)
emitDecoration(id, spv::Decoration::Block, {});
initTypeInstruction(spv::Op::OpTypeStruct);
curTypeInst.push_back(id);
for (auto fieldTypeId : fieldTypeIds)
curTypeInst.push_back(fieldTypeId);
finalizeTypeInstruction();
}
// Pointer types
else if (const auto *ptrType = dyn_cast<SpirvPointerType>(type)) {
const uint32_t pointeeType = emitType(ptrType->getPointeeType());
initTypeInstruction(spv::Op::OpTypePointer);
curTypeInst.push_back(id);
curTypeInst.push_back(static_cast<uint32_t>(ptrType->getStorageClass()));
curTypeInst.push_back(pointeeType);
finalizeTypeInstruction();
}
// Function types
else if (const auto *fnType = dyn_cast<FunctionType>(type)) {
const uint32_t retTypeId = emitType(fnType->getReturnType());
llvm::SmallVector<uint32_t, 4> paramTypeIds;
for (auto *paramType : fnType->getParamTypes())
paramTypeIds.push_back(emitType(paramType));
initTypeInstruction(spv::Op::OpTypeFunction);
curTypeInst.push_back(id);
curTypeInst.push_back(retTypeId);
for (auto paramTypeId : paramTypeIds)
curTypeInst.push_back(paramTypeId);
finalizeTypeInstruction();
}
// Acceleration Structure NV type
else if (isa<AccelerationStructureTypeNV>(type)) {
initTypeInstruction(spv::Op::OpTypeAccelerationStructureNV);
curTypeInst.push_back(id);
finalizeTypeInstruction();
}
// RayQueryType KHR type
else if (isa<RayQueryTypeKHR>(type)) {
initTypeInstruction(spv::Op::OpTypeRayQueryKHR);
curTypeInst.push_back(id);
finalizeTypeInstruction();
} else if (const auto *spvIntrinsicType =
dyn_cast<SpirvIntrinsicType>(type)) {
initTypeInstruction(static_cast<spv::Op>(spvIntrinsicType->getOpCode()));
curTypeInst.push_back(id);
for (const SpvIntrinsicTypeOperand &operand :
spvIntrinsicType->getOperands()) {
if (operand.isTypeOperand) {
// calling emitType recursively will potentially replace the contents of
// curTypeInst, so we need to save them and restore after the call
std::vector<uint32_t> outerTypeInst = curTypeInst;
outerTypeInst.push_back(emitType(operand.operand_as_type));
curTypeInst = outerTypeInst;
} else {
auto *literal = dyn_cast<SpirvConstant>(operand.operand_as_inst);
if (literal && literal->isLiteral()) {
emitLiteral(literal, curTypeInst);
} else {
curTypeInst.push_back(getOrAssignResultId(operand.operand_as_inst));
}
}
}
finalizeTypeInstruction();
}
// Hybrid Types
// Note: The type lowering pass should lower all types to SpirvTypes.
// Therefore, if we find a hybrid type when going through the emitting pass,
// that is clearly a bug.
else if (isa<HybridType>(type)) {
llvm_unreachable("found hybrid type when emitting SPIR-V");
}
// Unhandled types
else {
llvm_unreachable("unhandled type in emitType");
}
return id;
}
template <typename vecType>
void EmitTypeHandler::emitIntLiteral(const SpirvConstantInteger *intLiteral,
vecType &outInst) {
const auto &literalVal = intLiteral->getValue();
emitIntLiteral(literalVal, outInst);
}
template <typename vecType>
void EmitTypeHandler::emitIntLiteral(const llvm::APInt &literalVal,
vecType &outInst) {
bool positive = !literalVal.isNegative();
if (literalVal.getBitWidth() <= 32) {
outInst.push_back(positive ? literalVal.getZExtValue()
: literalVal.getSExtValue());
} else {
assert(literalVal.getBitWidth() == 64);
uint64_t val =
positive ? literalVal.getZExtValue() : literalVal.getSExtValue();
outInst.push_back(static_cast<unsigned>(val));
outInst.push_back(static_cast<unsigned>(val >> 32));
}
}
template <typename vecType>
void EmitTypeHandler::emitFloatLiteral(const SpirvConstantFloat *fLiteral,
vecType &outInst) {
const auto &literalVal = fLiteral->getValue();
const auto bitwidth = llvm::APFloat::getSizeInBits(literalVal.getSemantics());
if (bitwidth <= 32) {
outInst.push_back(literalVal.bitcastToAPInt().getZExtValue());
} else {
assert(bitwidth == 64);
uint64_t val = literalVal.bitcastToAPInt().getZExtValue();
outInst.push_back(static_cast<unsigned>(val));
outInst.push_back(static_cast<unsigned>(val >> 32));
}
}
template <typename VecType>
void EmitTypeHandler::emitLiteral(const SpirvConstant *literal,
VecType &outInst) {
if (auto boolLiteral = dyn_cast<SpirvConstantBoolean>(literal)) {
outInst.push_back(static_cast<unsigned>(boolLiteral->getValue()));
} else if (auto intLiteral = dyn_cast<SpirvConstantInteger>(literal)) {
emitIntLiteral(intLiteral, outInst);
} else if (auto fLiteral = dyn_cast<SpirvConstantFloat>(literal)) {
emitFloatLiteral(fLiteral, outInst);
}
}
void EmitTypeHandler::emitDecoration(uint32_t typeResultId,
spv::Decoration decoration,
llvm::ArrayRef<uint32_t> decorationParams,
llvm::Optional<uint32_t> memberIndex) {
spv::Op op =
memberIndex.hasValue() ? spv::Op::OpMemberDecorate : spv::Op::OpDecorate;
if (decoration == spv::Decoration::UserTypeGOOGLE) {
op = memberIndex.hasValue() ? spv::Op::OpMemberDecorateString
: spv::Op::OpDecorateString;
}
assert(curDecorationInst.empty());
curDecorationInst.push_back(static_cast<uint32_t>(op));
curDecorationInst.push_back(typeResultId);
if (memberIndex.hasValue())
curDecorationInst.push_back(memberIndex.getValue());
curDecorationInst.push_back(static_cast<uint32_t>(decoration));
for (auto param : decorationParams)
curDecorationInst.push_back(param);
curDecorationInst[0] |= static_cast<uint32_t>(curDecorationInst.size()) << 16;
// Add to the full annotations list
annotationsBinary->insert(annotationsBinary->end(), curDecorationInst.begin(),
curDecorationInst.end());
curDecorationInst.clear();
}
void EmitTypeHandler::emitNameForType(llvm::StringRef name,
uint32_t targetTypeId,
llvm::Optional<uint32_t> memberIndex) {
if (name.empty())
return;
std::vector<uint32_t> nameInstr;
auto op = memberIndex.hasValue() ? spv::Op::OpMemberName : spv::Op::OpName;
nameInstr.push_back(static_cast<uint32_t>(op));
nameInstr.push_back(targetTypeId);
if (memberIndex.hasValue())
nameInstr.push_back(memberIndex.getValue());
const auto &words = string::encodeSPIRVString(name);
nameInstr.insert(nameInstr.end(), words.begin(), words.end());
nameInstr[0] |= static_cast<uint32_t>(nameInstr.size()) << 16;
debugVariableBinary->insert(debugVariableBinary->end(), nameInstr.begin(),
nameInstr.end());
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RelaxedPrecisionVisitor.h | //===--- RelaxedPrecisionVisitor.h - RelaxedPrecision Visitor ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_RELAXEDPRECISIONVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_RELAXEDPRECISIONVISITOR_H
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class RelaxedPrecisionVisitor : public Visitor {
public:
RelaxedPrecisionVisitor(SpirvContext &spvCtx, const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx) {}
bool visit(SpirvFunction *, Phase) override;
bool visit(SpirvVariable *) override;
bool visit(SpirvFunctionParameter *) override;
bool visit(SpirvAccessChain *) override;
bool visit(SpirvAtomic *) override;
bool visit(SpirvBitFieldExtract *) override;
bool visit(SpirvBitFieldInsert *) override;
bool visit(SpirvConstantBoolean *) override;
bool visit(SpirvConstantInteger *) override;
bool visit(SpirvConstantFloat *) override;
bool visit(SpirvConstantComposite *) override;
bool visit(SpirvCompositeConstruct *) override;
bool visit(SpirvCompositeExtract *) override;
bool visit(SpirvCompositeInsert *) override;
bool visit(SpirvExtInst *) override;
bool visit(SpirvFunctionCall *) override;
bool visit(SpirvLoad *) override;
bool visit(SpirvSelect *) override;
bool visit(SpirvStore *) override;
bool visit(SpirvSpecConstantBinaryOp *) override;
bool visit(SpirvSpecConstantUnaryOp *) override;
bool visit(SpirvBinaryOp *) override;
bool visit(SpirvUnaryOp *) override;
bool visit(SpirvVectorShuffle *) override;
bool visit(SpirvImageOp *) override;
using Visitor::visit;
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override { return true; }
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_RELAXEDPRECISIONVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/GlPerVertex.cpp | //===--- GlPerVertex.cpp - GlPerVertex implementation ------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "GlPerVertex.h"
#include <algorithm>
#include "clang/AST/Attr.h"
#include "clang/AST/HlslTypes.h"
#include "clang/SPIRV/AstTypeProbe.h"
namespace clang {
namespace spirv {
namespace {
constexpr uint32_t gClipDistanceIndex = 0;
constexpr uint32_t gCullDistanceIndex = 1;
/// \brief Returns true if the given decl has a semantic string attached and
/// writes the info to *semanticStr, *semantic, and *semanticIndex.
// TODO: duplication! Same as the one in DeclResultIdMapper.cpp
bool getStageVarSemantic(const NamedDecl *decl, llvm::StringRef *semanticStr,
const hlsl::Semantic **semantic,
uint32_t *semanticIndex) {
for (auto *annotation : decl->getUnusualAnnotations()) {
if (auto *sema = dyn_cast<hlsl::SemanticDecl>(annotation)) {
*semanticStr = sema->SemanticName;
llvm::StringRef semanticName;
hlsl::Semantic::DecomposeNameAndIndex(*semanticStr, &semanticName,
semanticIndex);
*semantic = hlsl::Semantic::GetByName(semanticName);
return true;
}
}
return false;
}
/// Returns the type of the given decl. If the given decl is a FunctionDecl,
/// returns its result type.
inline QualType getTypeOrFnRetType(const DeclaratorDecl *decl) {
if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
return funcDecl->getReturnType();
}
return decl->getType();
}
/// Returns true if the given declaration has a primitive type qualifier.
/// Returns false otherwise.
inline bool hasGSPrimitiveTypeQualifier(const NamedDecl *decl) {
return decl->hasAttr<HLSLTriangleAttr>() ||
decl->hasAttr<HLSLTriangleAdjAttr>() ||
decl->hasAttr<HLSLPointAttr>() || decl->hasAttr<HLSLLineAttr>() ||
decl->hasAttr<HLSLLineAdjAttr>();
}
} // anonymous namespace
GlPerVertex::GlPerVertex(ASTContext &context, SpirvContext &spirvContext,
SpirvBuilder &spirvBuilder)
: astContext(context), spvContext(spirvContext), spvBuilder(spirvBuilder),
inClipVar(nullptr), inCullVar(nullptr), outClipVar(nullptr),
outCullVar(nullptr), inClipPrecise(false), outClipPrecise(false),
inCullPrecise(false), outCullPrecise(false), inArraySize(0),
outArraySize(0), inClipArraySize(1), outClipArraySize(1),
inCullArraySize(1), outCullArraySize(1), inSemanticStrs(2, ""),
outSemanticStrs(2, "") {}
void GlPerVertex::generateVars(uint32_t inArrayLen, uint32_t outArrayLen) {
inArraySize = inArrayLen;
outArraySize = outArrayLen;
if (!inClipType.empty())
inClipVar = createClipCullDistanceVar(/*asInput=*/true, /*isClip=*/true,
inClipArraySize, inClipPrecise);
if (!inCullType.empty())
inCullVar = createClipCullDistanceVar(/*asInput=*/true, /*isClip=*/false,
inCullArraySize, inCullPrecise);
if (!outClipType.empty())
outClipVar = createClipCullDistanceVar(/*asInput=*/false, /*isClip=*/true,
outClipArraySize, outClipPrecise);
if (!outCullType.empty())
outCullVar = createClipCullDistanceVar(/*asInput=*/false, /*isClip=*/false,
outCullArraySize, outCullPrecise);
}
llvm::SmallVector<SpirvVariable *, 2> GlPerVertex::getStageInVars() const {
llvm::SmallVector<SpirvVariable *, 2> vars;
if (inClipVar)
vars.push_back(inClipVar);
if (inCullVar)
vars.push_back(inCullVar);
return vars;
}
llvm::SmallVector<SpirvVariable *, 2> GlPerVertex::getStageOutVars() const {
llvm::SmallVector<SpirvVariable *, 2> vars;
if (outClipVar)
vars.push_back(outClipVar);
if (outCullVar)
vars.push_back(outCullVar);
return vars;
}
bool GlPerVertex::recordGlPerVertexDeclFacts(const DeclaratorDecl *decl,
bool asInput) {
const QualType type = getTypeOrFnRetType(decl);
if (type->isVoidType())
return true;
// Indices or payload mesh shader param objects don't contain any
// builtin variables or semantic strings. So early return.
if (decl->hasAttr<HLSLIndicesAttr>() || decl->hasAttr<HLSLPayloadAttr>()) {
return true;
}
return doGlPerVertexFacts(decl, type, asInput);
}
bool GlPerVertex::containOnlyFloatType(QualType type) const {
QualType elemType;
if (isScalarType(type, &elemType)) {
if (const auto *builtinType = elemType->getAs<BuiltinType>())
return builtinType->getKind() == BuiltinType::Float;
return false;
} else if (isVectorType(type, &elemType, nullptr)) {
return containOnlyFloatType(elemType);
} else if (const auto *arrayType = astContext.getAsConstantArrayType(type)) {
return containOnlyFloatType(arrayType->getElementType());
}
return false;
}
uint32_t GlPerVertex::getNumberOfScalarComponentsInScalarVectorArray(
QualType type) const {
uint32_t count = 0;
if (isScalarType(type)) {
return 1;
} else if (isVectorType(type, nullptr, &count)) {
return count;
} else if (type->isConstantArrayType()) {
const auto *arrayType = astContext.getAsConstantArrayType(type);
count = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
return count * getNumberOfScalarComponentsInScalarVectorArray(
arrayType->getElementType());
}
return 0;
}
SpirvInstruction *GlPerVertex::createScalarClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset, SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex) const {
if (!isScalarType(asType))
return nullptr;
// The ClipDistance/CullDistance is always an float array. We are accessing
// it using pointers, which should be of pointer to float type.
const QualType f32Type = astContext.FloatTy;
llvm::SmallVector<SpirvInstruction *, 2> spirvConstants;
if (arrayIndex.hasValue()) {
spirvConstants.push_back(spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, arrayIndex.getValue())));
}
spirvConstants.push_back(spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, offset)));
return spvBuilder.createLoad(
f32Type, spvBuilder.createAccessChain(f32Type, ptr, spirvConstants, loc),
loc);
}
SpirvInstruction *GlPerVertex::createScalarOrVectorClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset, SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex) const {
if (isScalarType(asType))
return createScalarClipCullDistanceLoad(ptr, asType, offset, loc,
arrayIndex);
QualType elemType = {};
uint32_t count = 0;
if (!isVectorType(asType, &elemType, &count))
return nullptr;
// The target SV_ClipDistance/SV_CullDistance variable is of vector
// type, then we need to construct a vector out of float array elements.
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < count; ++i) {
elements.push_back(createScalarClipCullDistanceLoad(
ptr, elemType, offset + i, loc, arrayIndex));
}
return spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(astContext.FloatTy, count), elements, loc);
}
SpirvInstruction *GlPerVertex::createClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset, SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex) const {
if (asType->isConstantArrayType()) {
const auto *arrayType = astContext.getAsConstantArrayType(asType);
uint32_t count = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
QualType elemType = arrayType->getElementType();
uint32_t numberOfScalarsInElement =
getNumberOfScalarComponentsInScalarVectorArray(elemType);
if (numberOfScalarsInElement == 0)
return nullptr;
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < count; ++i) {
elements.push_back(createScalarOrVectorClipCullDistanceLoad(
ptr, elemType, offset + i * numberOfScalarsInElement, loc,
arrayIndex));
}
return spvBuilder.createCompositeConstruct(asType, elements, loc);
}
return createScalarOrVectorClipCullDistanceLoad(ptr, asType, offset, loc,
arrayIndex);
}
bool GlPerVertex::createScalarClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::ArrayRef<uint32_t> valueIndices,
llvm::Optional<SpirvInstruction *> arrayIndex) const {
if (!isScalarType(valueType))
return false;
llvm::SmallVector<SpirvInstruction *, 2> ptrIndices;
if (arrayIndex.hasValue()) {
ptrIndices.push_back(arrayIndex.getValue());
}
ptrIndices.push_back(offset);
ptr = spvBuilder.createAccessChain(astContext.FloatTy, ptr, ptrIndices, loc);
if (!valueIndices.empty()) {
value = spvBuilder.createCompositeExtract(astContext.FloatTy, value,
valueIndices, loc);
}
spvBuilder.createStore(ptr, value, loc);
return true;
}
bool GlPerVertex::createScalarOrVectorClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::Optional<uint32_t> valueOffset,
llvm::Optional<SpirvInstruction *> arrayIndex) const {
llvm::SmallVector<uint32_t, 2> valueIndices;
if (valueOffset.hasValue())
valueIndices.push_back(valueOffset.getValue());
if (isScalarType(valueType)) {
return createScalarClipCullDistanceStore(ptr, value, valueType, offset, loc,
valueIndices, arrayIndex);
}
QualType elemType = {};
uint32_t count = 0;
if (!isVectorType(valueType, &elemType, &count))
return false;
// The target SV_ClipDistance/SV_CullDistance variable is of vector
// type, then we need to construct a vector out of float array elements.
for (uint32_t i = 0; i < count; ++i) {
auto *constant =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, i));
auto *elemOffset = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, astContext.UnsignedIntTy, offset, constant, loc);
valueIndices.push_back(i);
createScalarClipCullDistanceStore(ptr, value, elemType, elemOffset, loc,
valueIndices, arrayIndex);
valueIndices.pop_back();
}
return true;
}
bool GlPerVertex::createClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::Optional<SpirvInstruction *> arrayIndex) const {
if (valueType->isConstantArrayType()) {
const auto *arrayType = astContext.getAsConstantArrayType(valueType);
uint32_t count = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
QualType elemType = arrayType->getElementType();
uint32_t numberOfScalarsInElement =
getNumberOfScalarComponentsInScalarVectorArray(elemType);
if (numberOfScalarsInElement == 0)
return false;
for (uint32_t i = 0; i < count; ++i) {
auto *constant = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32, i * numberOfScalarsInElement));
auto *elemOffset = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, astContext.UnsignedIntTy, offset, constant, loc);
createScalarOrVectorClipCullDistanceStore(
ptr, value, elemType, elemOffset, loc, llvm::Optional<uint32_t>(i),
arrayIndex);
}
return true;
}
return createScalarOrVectorClipCullDistanceStore(
ptr, value, valueType, offset, loc, llvm::None, arrayIndex);
}
bool GlPerVertex::setClipCullDistanceType(SemanticIndexToTypeMap *typeMap,
uint32_t semanticIndex,
QualType clipCullDistanceType) const {
if (getNumberOfScalarComponentsInScalarVectorArray(clipCullDistanceType) ==
0) {
return false;
}
(*typeMap)[semanticIndex] = clipCullDistanceType;
return true;
}
bool GlPerVertex::doGlPerVertexFacts(const NamedDecl *decl, QualType baseType,
bool asInput) {
llvm::StringRef semanticStr;
const hlsl::Semantic *semantic = {};
uint32_t semanticIndex = {};
bool isPrecise = decl->hasAttr<HLSLPreciseAttr>();
if (!getStageVarSemantic(decl, &semanticStr, &semantic, &semanticIndex)) {
if (baseType->isStructureType()) {
const auto *recordType = baseType->getAs<RecordType>();
const auto *recordDecl = recordType->getAsCXXRecordDecl();
// Go through each field to see if there is any usage of
// SV_ClipDistance/SV_CullDistance.
for (const auto *field : recordDecl->fields()) {
if (!doGlPerVertexFacts(field, field->getType(), asInput))
return false;
}
// We should also recursively go through each inherited class.
for (const auto &base : recordDecl->bases()) {
const auto *baseDecl = base.getType()->getAsCXXRecordDecl();
if (!doGlPerVertexFacts(baseDecl, base.getType(), asInput))
return false;
}
return true;
}
// For these HS/DS/GS specific data types, semantic strings are attached
// to the underlying struct's fields.
if (hlsl::IsHLSLInputPatchType(baseType)) {
return doGlPerVertexFacts(
decl, hlsl::GetHLSLInputPatchElementType(baseType), asInput);
}
if (hlsl::IsHLSLOutputPatchType(baseType) ||
hlsl::IsHLSLStreamOutputType(baseType)) {
return doGlPerVertexFacts(
decl, hlsl::GetHLSLOutputPatchElementType(baseType), asInput);
}
if (hasGSPrimitiveTypeQualifier(decl) ||
decl->hasAttr<HLSLVerticesAttr>() ||
decl->hasAttr<HLSLPrimitivesAttr>()) {
// GS inputs and MS output attribute have an additional arrayness that we
// should remove to check the underlying type instead.
baseType = astContext.getAsConstantArrayType(baseType)->getElementType();
return doGlPerVertexFacts(decl, baseType, asInput);
}
emitError("semantic string missing for shader %select{output|input}0 "
"variable '%1'",
decl->getLocation())
<< asInput << decl->getName();
return false;
}
// Semantic string is attached to this decl directly
// Select the corresponding data member to update
SemanticIndexToTypeMap *typeMap = nullptr;
uint32_t *blockArraySize = asInput ? &inArraySize : &outArraySize;
bool isCull = false;
auto *semanticStrs = asInput ? &inSemanticStrs : &outSemanticStrs;
uint32_t index = kSemanticStrCount;
switch (semantic->GetKind()) {
case hlsl::Semantic::Kind::ClipDistance:
typeMap = asInput ? &inClipType : &outClipType;
index = gClipDistanceIndex;
break;
case hlsl::Semantic::Kind::CullDistance:
typeMap = asInput ? &inCullType : &outCullType;
isCull = true;
index = gCullDistanceIndex;
break;
default:
// Only Cull or Clip apply.
break;
}
if (isCull) {
if (asInput)
inCullPrecise = isPrecise;
else
outCullPrecise = isPrecise;
} else {
if (asInput)
inClipPrecise = isPrecise;
else
outClipPrecise = isPrecise;
}
// Remember the semantic strings provided by the developer so that we can
// emit OpDecorate* instructions properly for them
if (index < kSemanticStrCount) {
if ((*semanticStrs)[index].empty())
(*semanticStrs)[index] = semanticStr;
// We can have multiple ClipDistance/CullDistance semantics mapping to the
// same variable. For those cases, it is not appropriate to use any one of
// them as the semantic. Use the standard one without index.
else if (index == gClipDistanceIndex)
(*semanticStrs)[index] = "SV_ClipDistance";
else if (index == gCullDistanceIndex)
(*semanticStrs)[index] = "SV_CullDistance";
}
if (index > gCullDistanceIndex) {
// Annotated with something other than SV_ClipDistance or SV_CullDistance.
// We don't care about such cases.
return true;
}
// Parameters marked as inout has reference type.
if (baseType->isReferenceType())
baseType = baseType->getPointeeType();
// Clip/cull distance must be made up only with floats.
if (!containOnlyFloatType(baseType)) {
emitError("elements for %select{SV_ClipDistance|SV_CullDistance}0 "
"variable '%1' must be scalar, vector, or array with float type",
decl->getLocStart())
<< isCull << decl->getName();
return false;
}
if (baseType->isConstantArrayType()) {
const auto *arrayType = astContext.getAsConstantArrayType(baseType);
// TODO: handle extra large array size?
if (*blockArraySize ==
static_cast<uint32_t>(arrayType->getSize().getZExtValue())) {
if (setClipCullDistanceType(typeMap, semanticIndex,
arrayType->getElementType())) {
return true;
}
emitError("elements for %select{SV_ClipDistance|SV_CullDistance}0 "
"variable '%1' must be scalar, vector, or array with float "
"type",
decl->getLocStart())
<< isCull << decl->getName();
return false;
}
}
if (setClipCullDistanceType(typeMap, semanticIndex, baseType)) {
return true;
}
emitError("type for %select{SV_ClipDistance|SV_CullDistance}0 "
"variable '%1' must be a scalar, vector, or array with float type",
decl->getLocStart())
<< isCull << decl->getName();
return false;
}
void GlPerVertex::calculateClipCullDistanceArraySize() {
// Updates the offset map and array size for the given input/output
// SV_ClipDistance/SV_CullDistance.
const auto updateSizeAndOffset =
[this](const SemanticIndexToTypeMap &typeMap,
SemanticIndexToArrayOffsetMap *offsetMap, uint32_t *totalSize) {
// If no usage of SV_ClipDistance/SV_CullDistance was recorded,just
// return. This will keep the size defaulted to 1.
if (typeMap.empty())
return;
*totalSize = 0;
// Collect all indices and sort them
llvm::SmallVector<uint32_t, 8> indices;
for (const auto &kv : typeMap)
indices.push_back(kv.first);
std::sort(indices.begin(), indices.end(), std::less<uint32_t>());
for (uint32_t index : indices) {
const auto type = typeMap.find(index)->second;
uint32_t count = getNumberOfScalarComponentsInScalarVectorArray(type);
if (count == 0) {
llvm_unreachable("SV_ClipDistance/SV_CullDistance has unexpected "
"type or size");
}
(*offsetMap)[index] = *totalSize;
*totalSize += count;
}
};
updateSizeAndOffset(inClipType, &inClipOffset, &inClipArraySize);
updateSizeAndOffset(inCullType, &inCullOffset, &inCullArraySize);
updateSizeAndOffset(outClipType, &outClipOffset, &outClipArraySize);
updateSizeAndOffset(outCullType, &outCullOffset, &outCullArraySize);
}
SpirvVariable *GlPerVertex::createClipCullDistanceVar(bool asInput, bool isClip,
uint32_t arraySize,
bool isPrecise) {
QualType type = astContext.getConstantArrayType(astContext.FloatTy,
llvm::APInt(32, arraySize),
clang::ArrayType::Normal, 0);
if (asInput && inArraySize != 0) {
type = astContext.getConstantArrayType(type, llvm::APInt(32, inArraySize),
clang::ArrayType::Normal, 0);
} else if (!asInput && outArraySize != 0) {
type = astContext.getConstantArrayType(type, llvm::APInt(32, outArraySize),
clang::ArrayType::Normal, 0);
}
spv::StorageClass sc =
asInput ? spv::StorageClass::Input : spv::StorageClass::Output;
SpirvVariable *var = spvBuilder.addStageBuiltinVar(
type, sc,
isClip ? spv::BuiltIn::ClipDistance : spv::BuiltIn::CullDistance,
isPrecise, /*SourceLocation*/ {});
const auto index = isClip ? gClipDistanceIndex : gCullDistanceIndex;
spvBuilder.decorateHlslSemantic(var, asInput ? inSemanticStrs[index]
: outSemanticStrs[index]);
return var;
}
bool GlPerVertex::tryToAccess(hlsl::SigPoint::Kind sigPointKind,
hlsl::Semantic::Kind semanticKind,
uint32_t semanticIndex,
llvm::Optional<SpirvInstruction *> invocationId,
SpirvInstruction **value, bool noWriteBack,
SpirvInstruction *vecComponent,
SourceLocation loc, SourceRange range) {
assert(value);
// invocationId should only be used for HSPCOut or MSOut.
assert(invocationId.hasValue()
? (sigPointKind == hlsl::SigPoint::Kind::HSCPOut ||
sigPointKind == hlsl::SigPoint::Kind::MSOut)
: true);
switch (semanticKind) {
case hlsl::Semantic::Kind::ClipDistance:
case hlsl::Semantic::Kind::CullDistance:
// gl_PerVertex only cares about these builtins.
break;
default:
return false; // Fall back to the normal path
}
switch (sigPointKind) {
case hlsl::SigPoint::Kind::PSIn:
case hlsl::SigPoint::Kind::HSCPIn:
case hlsl::SigPoint::Kind::DSCPIn:
case hlsl::SigPoint::Kind::GSVIn:
return readField(semanticKind, semanticIndex, value, loc, range);
case hlsl::SigPoint::Kind::GSOut:
case hlsl::SigPoint::Kind::VSOut:
case hlsl::SigPoint::Kind::HSCPOut:
case hlsl::SigPoint::Kind::DSOut:
case hlsl::SigPoint::Kind::MSOut:
if (noWriteBack)
return true;
return writeField(semanticKind, semanticIndex, invocationId, value,
vecComponent, loc, range);
default:
// Only interfaces that involve gl_PerVertex are needed.
break;
}
return false;
}
SpirvInstruction *
GlPerVertex::readClipCullArrayAsType(bool isClip, uint32_t offset,
QualType asType, SourceLocation loc,
SourceRange range) const {
SpirvVariable *clipCullVar = isClip ? inClipVar : inCullVar;
uint32_t count = getNumberOfScalarComponentsInScalarVectorArray(asType);
if (count == 0) {
llvm_unreachable("SV_ClipDistance/SV_CullDistance has unexpected type "
"or size");
}
if (inArraySize == 0) {
return createClipCullDistanceLoad(clipCullVar, asType, offset, loc);
}
// The input builtin block is an array of block, which means we need to
// return an array of ClipDistance/CullDistance values from an array of
// struct.
llvm::SmallVector<SpirvInstruction *, 8> arrayElements;
QualType arrayType = {};
for (uint32_t i = 0; i < inArraySize; ++i) {
arrayElements.push_back(createClipCullDistanceLoad(
clipCullVar, asType, offset, loc, llvm::Optional<uint32_t>(i)));
}
arrayType = astContext.getConstantArrayType(
asType, llvm::APInt(32, inArraySize), clang::ArrayType::Normal, 0);
return spvBuilder.createCompositeConstruct(arrayType, arrayElements, loc);
}
bool GlPerVertex::readField(hlsl::Semantic::Kind semanticKind,
uint32_t semanticIndex, SpirvInstruction **value,
SourceLocation loc, SourceRange range) {
assert(value);
switch (semanticKind) {
case hlsl::Semantic::Kind::ClipDistance: {
const auto offsetIter = inClipOffset.find(semanticIndex);
const auto typeIter = inClipType.find(semanticIndex);
// We should have recorded all these semantics before.
assert(offsetIter != inClipOffset.end());
assert(typeIter != inClipType.end());
*value = readClipCullArrayAsType(/*isClip=*/true, offsetIter->second,
typeIter->second, loc, range);
return true;
}
case hlsl::Semantic::Kind::CullDistance: {
const auto offsetIter = inCullOffset.find(semanticIndex);
const auto typeIter = inCullType.find(semanticIndex);
// We should have recorded all these semantics before.
assert(offsetIter != inCullOffset.end());
assert(typeIter != inCullType.end());
*value = readClipCullArrayAsType(/*isClip=*/false, offsetIter->second,
typeIter->second, loc, range);
return true;
}
default:
// Only Cull or Clip apply.
break;
}
return false;
}
void GlPerVertex::writeClipCullArrayFromType(
llvm::Optional<SpirvInstruction *> invocationId, bool isClip,
SpirvInstruction *offset, QualType fromType, SpirvInstruction *fromValue,
SourceLocation loc, SourceRange range) const {
auto *clipCullVar = isClip ? outClipVar : outCullVar;
if (outArraySize == 0) {
// The output builtin does not have extra arrayness. Only need one index
// to locate the array segment for this SV_ClipDistance/SV_CullDistance
// variable: the start offset within the float array.
if (createClipCullDistanceStore(clipCullVar, fromValue, fromType, offset,
loc)) {
return;
}
llvm_unreachable("SV_ClipDistance/SV_CullDistance has unexpected type "
"or size");
return;
}
// Writing to an array only happens in HSCPOut or MSOut.
if (!spvContext.isHS() && !spvContext.isMS()) {
llvm_unreachable("Writing to clip/cull distance in hull/mesh shader is "
"not allowed");
}
// And we are only writing to the array element with InvocationId as index.
assert(invocationId.hasValue());
// The output builtin block is an array of block, which means we need to
// write an array of ClipDistance/CullDistance values into an array of
// struct.
SpirvInstruction *arrayIndex = invocationId.getValue();
if (createClipCullDistanceStore(
clipCullVar, fromValue, fromType, offset, loc,
llvm::Optional<SpirvInstruction *>(arrayIndex))) {
return;
}
llvm_unreachable("SV_ClipDistance/SV_CullDistance has unexpected type or "
"size");
}
bool GlPerVertex::writeField(hlsl::Semantic::Kind semanticKind,
uint32_t semanticIndex,
llvm::Optional<SpirvInstruction *> invocationId,
SpirvInstruction **value,
SpirvInstruction *vecComponent, SourceLocation loc,
SourceRange range) {
// Similar to the writing logic in DeclResultIdMapper::createStageVars():
//
// Unlike reading, which may require us to read stand-alone builtins and
// stage input variables and compose an array of structs out of them,
// it happens that we don't need to write an array of structs in a bunch
// for all shader stages:
//
// * VS: output is a single struct, without extra arrayness
// * HS: output is an array of structs, with extra arrayness,
// but we only write to the struct at the InvocationID index
// * DS: output is a single struct, without extra arrayness
// * GS: output is controlled by OpEmitVertex, one vertex per time
// * MS: output is an array of structs, with extra arrayness
//
// The interesting shader stage is HS. We need the InvocationID to write
// out the value to the correct array element.
SpirvInstruction *offset = nullptr;
QualType type = {};
bool isClip = false;
switch (semanticKind) {
case hlsl::Semantic::Kind::ClipDistance: {
const auto offsetIter = outClipOffset.find(semanticIndex);
const auto typeIter = outClipType.find(semanticIndex);
// We should have recorded all these semantics before.
assert(offsetIter != outClipOffset.end());
assert(typeIter != outClipType.end());
offset = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, offsetIter->second));
type = typeIter->second;
isClip = true;
break;
}
case hlsl::Semantic::Kind::CullDistance: {
const auto offsetIter = outCullOffset.find(semanticIndex);
const auto typeIter = outCullType.find(semanticIndex);
// We should have recorded all these semantics before.
assert(offsetIter != outCullOffset.end());
assert(typeIter != outCullType.end());
offset = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, offsetIter->second));
type = typeIter->second;
break;
}
default:
// Only Cull or Clip apply.
return false;
}
if (vecComponent) {
QualType elemType = {};
if (!isVectorType(type, &elemType)) {
assert(false && "expected vector type");
}
type = elemType;
offset =
spvBuilder.createBinaryOp(spv::Op::OpIAdd, astContext.UnsignedIntTy,
vecComponent, offset, loc, range);
}
writeClipCullArrayFromType(invocationId, isClip, offset, type, *value, loc,
range);
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/PervertexInputVisitor.h | //===--- PervertexInputVisitor.h ---- PerVertex Input Visitor ----------------//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_PERVERTEXINPUTVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_PERVERTEXINPUTVISITOR_H
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvModule.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class PervertexInputVisitor : public Visitor {
public:
PervertexInputVisitor(SpirvBuilder &spvBuilder, ASTContext &astCtx,
SpirvContext &spvCtx, const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx), inEntryFunctionWrapper(false),
spirvBuilder(spvBuilder), astContext(astCtx), currentMod(nullptr),
currentFunc(nullptr) {}
///< Don't add extra index to a simple vector/matrix elem access when base is
///< not expanded.
bool isNotExpandedVectorAccess(QualType baseType, QualType resultType);
///< Expand nointerpolation decorated variables/parameters.
///< If a variable/parameter is passed from a decorated inputs, it should be
///< treated as nointerpolated too.
bool expandNointerpVarAndParam(SpirvInstruction *spvInst);
bool expandNointerpStructure(QualType qtype, bool isVarDecoratedInterp);
///< Add temp function variables, for operand replacement. An original usage
///< to a nointerpolated variable/parameter should be treated as an access to
///< its first element after expanding (data at first provoking vertex).
SpirvInstruction *createFirstPerVertexVar(SpirvInstruction *base,
llvm::StringRef varName);
SpirvVariable *addFunctionTempVar(llvm::StringRef name, QualType valueType,
SourceLocation loc, bool isPrecise);
SpirvInstruction *createProvokingVertexAccessChain(SpirvInstruction *base,
uint32_t index,
QualType resultType);
///< Get mapped operand used to replace original operand, if not exists,
///< return itself.
SpirvInstruction *getMappedReplaceInstr(SpirvInstruction *i);
///< For expanded variables, we need to decide where to add an extra index
///< zero for SpirvAccessChain and SpirvCompositeExtract. This comes to
///< three access cases : 1. array element. 2. structure member 3. vector
///< channel.
int appendIndexZeroAt(QualType base, llvm::ArrayRef<uint32_t> index);
///< When use temp variables within a function, we need to add load/store ops.
///< TIP: A nointerpolated input or function parameter will be treated as
///< input.vtx0 within current function, but would be treated as an array will
///< pass to a function call.
SpirvInstruction *createVertexLoad(SpirvInstruction *base);
void createVertexStore(SpirvInstruction *pt, SpirvInstruction *obj);
SpirvInstruction *
createVertexAccessChain(QualType resultType, SpirvInstruction *base,
llvm::ArrayRef<SpirvInstruction *> indexes);
///< Visit different SPIR-V constructs for emitting.
using Visitor::visit;
bool visit(SpirvModule *, Phase phase) override;
bool visit(SpirvFunction *, Phase phase) override;
bool visit(SpirvEntryPoint *) override;
bool visit(SpirvVariable *) override;
bool visit(SpirvFunctionParameter *) override;
bool visit(SpirvAccessChain *) override;
bool visit(SpirvCompositeExtract *) override;
bool visit(SpirvFunctionCall *) override;
#define REMAP_FUNC_OP(CLASS) \
bool visit(Spirv##CLASS *op) override { \
op->replaceOperand( \
[this](SpirvInstruction *inst) { \
return getMappedReplaceInstr(inst); \
}, \
inEntryFunctionWrapper); \
return true; \
}
REMAP_FUNC_OP(ImageQuery)
REMAP_FUNC_OP(ImageOp)
REMAP_FUNC_OP(ExtInst)
REMAP_FUNC_OP(Atomic)
REMAP_FUNC_OP(BitFieldInsert)
REMAP_FUNC_OP(BitFieldExtract)
REMAP_FUNC_OP(IntrinsicInstruction)
REMAP_FUNC_OP(VectorShuffle)
REMAP_FUNC_OP(CompositeConstruct)
REMAP_FUNC_OP(BinaryOp)
REMAP_FUNC_OP(Store)
REMAP_FUNC_OP(Load)
REMAP_FUNC_OP(UnaryOp)
REMAP_FUNC_OP(CompositeInsert)
REMAP_FUNC_OP(BranchConditional)
REMAP_FUNC_OP(Return)
REMAP_FUNC_OP(ImageTexelPointer)
REMAP_FUNC_OP(Select)
REMAP_FUNC_OP(Switch)
REMAP_FUNC_OP(CopyObject)
REMAP_FUNC_OP(GroupNonUniformOp)
private:
///< Whether in entry function wrapper, which will influence replace steps.
bool inEntryFunctionWrapper;
///< Instruction replacement mapper.
///< For AccessChain and CompositeExtract, will only add extra index.
llvm::DenseMap<SpirvInstruction *, SpirvInstruction *> m_instrReplaceMap;
///< Global declared structure type is special,
///< we won't redeclare/expand it more than once.
llvm::SmallSet<const Type *, 4> m_expandedStructureType;
///< Context related helpers, will use to modify spv instruction stream.
SpirvBuilder &spirvBuilder;
ASTContext &astContext;
SpirvModule *currentMod;
SpirvFunction *currentFunc;
llvm::DenseMap<SpirvFunctionParameter *, std::vector<SpirvInstruction *>>
paramCaller;
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation srcLoc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_PERVERTEXINPUTVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/EmitSpirvAction.cpp | //===--- EmitSpirvAction.cpp - EmitSPIRVAction implementation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/EmitSpirvAction.h"
#include "SpirvEmitter.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Frontend/CompilerInstance.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
std::unique_ptr<ASTConsumer>
EmitSpirvAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return llvm::make_unique<spirv::SpirvEmitter>(CI);
}
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/BlockReadableOrder.cpp | //===--- BlockReadableOrder.cpp - BlockReadableOrderVisitor impl ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "BlockReadableOrder.h"
namespace clang {
namespace spirv {
void BlockReadableOrderVisitor::visit(SpirvBasicBlock *block) {
if (doneBlocks.count(block) || todoBlocks.count(block))
return;
callback(block);
doneBlocks.insert(block);
// Check the continue and merge targets. If any one of them exists, we need
// to make sure visiting it is delayed until we've done the rest.
SpirvBasicBlock *continueBlock = block->getContinueTarget();
SpirvBasicBlock *mergeBlock = block->getMergeTarget();
if (continueBlock)
todoBlocks.insert(continueBlock);
if (mergeBlock)
todoBlocks.insert(mergeBlock);
for (SpirvBasicBlock *successor : block->getSuccessors())
visit(successor);
// Handle continue and merge targets now.
if (continueBlock) {
todoBlocks.erase(continueBlock);
visit(continueBlock);
}
if (mergeBlock) {
todoBlocks.erase(mergeBlock);
visit(mergeBlock);
}
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/GlPerVertex.h | //===--- GlPerVertex.h - For handling gl_PerVertex members -------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_GLPERVERTEX_H
#define LLVM_CLANG_LIB_SPIRV_GLPERVERTEX_H
#include "dxc/DXIL/DxilSemantic.h"
#include "dxc/DXIL/DxilSigPoint.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
namespace clang {
namespace spirv {
/// The class for handling ClipDistance and CullDistance builtin variables that
/// belong to gl_PerVertex.
///
/// Reading/writing of the ClipDistance/CullDistance builtin is not as
/// straightforward as other builtins. This is because in HLSL, we can have
/// multiple entities annotated with SV_ClipDistance/SV_CullDistance and they
/// can be float or vector of float type. For example,
///
/// float2 var1 : SV_ClipDistance2,
/// float var2 : SV_ClipDistance0,
/// float3 var3 : SV_ClipDistance1,
///
/// But in Vulkan, ClipDistance/CullDistance is required to be a float array.
/// So we need to combine these variables into one single float array. The way
/// we do it is by sorting all entities according to the SV_ClipDistance/
/// SV_CullDistance index, and concatenating them tightly. So for the above,
/// var2 will take the first two floats in the array, var3 will take the next
/// three, and var1 will take the next two. In total, we have an size-6 float
/// array for ClipDistance builtin.
class GlPerVertex {
public:
GlPerVertex(ASTContext &context, SpirvContext &spvContext,
SpirvBuilder &spvBuilder);
/// Records a declaration of SV_ClipDistance/SV_CullDistance so later
/// we can caculate the ClipDistance/CullDistance array layout.
/// Also records the semantic strings provided for them.
bool recordGlPerVertexDeclFacts(const DeclaratorDecl *decl, bool asInput);
/// Calculates the layout for ClipDistance/CullDistance arrays.
void calculateClipCullDistanceArraySize();
/// Emits SPIR-V code for the input and/or ouput ClipDistance/CullDistance
/// builtin variables. If inputArrayLength is not zero, the input variable
/// will have an additional arrayness of the given size. Similarly for
/// outputArrayLength.
///
/// Note that this method should be called after recordClipCullDistanceDecl()
/// and calculateClipCullDistanceArraySize().
void generateVars(uint32_t inputArrayLength, uint32_t outputArrayLength);
/// Returns the stage input variables.
llvm::SmallVector<SpirvVariable *, 2> getStageInVars() const;
/// Returns the stage output variables.
llvm::SmallVector<SpirvVariable *, 2> getStageOutVars() const;
/// Tries to access the builtin translated from the given HLSL semantic of the
/// given index.
///
/// If sigPoint indicates this is input, builtins will be read to compose a
/// new temporary value of the correct type and writes to *value. Otherwise,
/// the *value will be decomposed and writes to the builtins, unless
/// noWriteBack is true, which means do not write back the value.
///
/// If invocation (should only be used for HS) is not llvm::None, only
/// accesses the element at the invocation offset in the gl_PerVeterx array.
///
/// Creates SPIR-V instructions and returns true if we are accessing builtins
/// that are ClipDistance or CullDistance. Does nothing and returns true if
/// accessing builtins for others. Returns false if errors occurs.
bool tryToAccess(hlsl::SigPoint::Kind sigPoint, hlsl::Semantic::Kind,
uint32_t semanticIndex,
llvm::Optional<SpirvInstruction *> invocation,
SpirvInstruction **value, bool noWriteBack,
SpirvInstruction *vecComponent, SourceLocation loc,
SourceRange range = {});
private:
using SemanticIndexToTypeMap = llvm::DenseMap<uint32_t, QualType>;
using SemanticIndexToArrayOffsetMap = llvm::DenseMap<uint32_t, uint32_t>;
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N], SourceLocation loc) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(loc, diagId);
}
/// Creates a stand-alone ClipDistance/CullDistance builtin variable.
SpirvVariable *createClipCullDistanceVar(bool asInput, bool isClip,
uint32_t arraySize, bool isPrecise);
/// Creates SPIR-V instructions for reading the data starting from offset in
/// the ClipDistance/CullDistance builtin. The data read will be transformed
/// into the given type asType.
SpirvInstruction *readClipCullArrayAsType(bool isClip, uint32_t offset,
QualType asType, SourceLocation loc,
SourceRange range = {}) const;
/// Creates SPIR-V instructions to read a field in gl_PerVertex.
bool readField(hlsl::Semantic::Kind semanticKind, uint32_t semanticIndex,
SpirvInstruction **value, SourceLocation loc,
SourceRange range = {});
/// Creates SPIR-V instructions for writing data into the ClipDistance/
/// CullDistance builtin starting from offset. The value to be written is
/// fromValue, whose type is fromType. Necessary transformations will be
/// generated to make sure type correctness.
void
writeClipCullArrayFromType(llvm::Optional<SpirvInstruction *> invocationId,
bool isClip, SpirvInstruction *offset,
QualType fromType, SpirvInstruction *fromValue,
SourceLocation loc, SourceRange range = {}) const;
/// Creates SPIR-V instructions to write a field in gl_PerVertex.
bool writeField(hlsl::Semantic::Kind semanticKind, uint32_t semanticIndex,
llvm::Optional<SpirvInstruction *> invocationId,
SpirvInstruction **value, SpirvInstruction *vecComponent,
SourceLocation loc, SourceRange range = {});
/// Internal implementation for recordClipCullDistanceDecl().
bool doGlPerVertexFacts(const NamedDecl *decl, QualType type, bool asInput);
/// Returns whether the type is a scalar, vector, or array that contains
/// only scalars with float type.
bool containOnlyFloatType(QualType type) const;
/// Returns the number of all scalar components recursively included in a
/// scalar, vector, or array type. If type is not a scalar, vector, or array,
/// returns 0.
uint32_t getNumberOfScalarComponentsInScalarVectorArray(QualType type) const;
/// Creates load instruction for clip or cull distance with a scalar type.
SpirvInstruction *createScalarClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset,
SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex = llvm::None) const;
/// Creates load instruction for clip or cull distance with a scalar or vector
/// type.
SpirvInstruction *createScalarOrVectorClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset,
SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex = llvm::None) const;
/// Creates load instruction for clip or cull distance with a scalar or vector
/// or array type of them.
SpirvInstruction *createClipCullDistanceLoad(
SpirvInstruction *ptr, QualType asType, uint32_t offset,
SourceLocation loc,
llvm::Optional<uint32_t> arrayIndex = llvm::None) const;
/// Creates store instruction for clip or cull distance with a scalar type.
bool createScalarClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::ArrayRef<uint32_t> valueIndices,
llvm::Optional<SpirvInstruction *> arrayIndex = llvm::None) const;
/// Creates store instruction for clip or cull distance with a scalar or
/// vector type.
bool createScalarOrVectorClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::Optional<uint32_t> valueOffset,
llvm::Optional<SpirvInstruction *> arrayIndex = llvm::None) const;
/// Creates store instruction for clip or cull distance with a scalar or
/// vector or array type of them.
bool createClipCullDistanceStore(
SpirvInstruction *ptr, SpirvInstruction *value, QualType valueType,
SpirvInstruction *offset, SourceLocation loc,
llvm::Optional<SpirvInstruction *> arrayIndex = llvm::None) const;
/// Keeps the mapping semanticIndex to clipCullDistanceType in typeMap and
/// returns true if clipCullDistanceType is a valid type for clip/cull
/// distance. Otherwise, returns false.
bool setClipCullDistanceType(SemanticIndexToTypeMap *typeMap,
uint32_t semanticIndex,
QualType clipCullDistanceType) const;
private:
ASTContext &astContext;
SpirvContext &spvContext;
SpirvBuilder &spvBuilder;
/// Input/output ClipDistance/CullDistance variable.
SpirvVariable *inClipVar, *inCullVar;
SpirvVariable *outClipVar, *outCullVar;
// We need to record whether the variables with 'SV_ClipDistance' or
// 'SV_CullDistance' have the HLSL 'precise' keyword.
bool inClipPrecise, outClipPrecise;
bool inCullPrecise, outCullPrecise;
/// The array size for the input/output gl_PerVertex block member variables.
/// HS input and output, DS input, GS input has an additional level of
/// arrayness. The array size is stored in this variable. Zero means
/// the corresponding variable does not need extra arrayness.
uint32_t inArraySize, outArraySize;
/// The array size of input/output ClipDistance/CullDistance float arrays.
/// This is not the array size of the whole gl_PerVertex struct.
uint32_t inClipArraySize, outClipArraySize;
uint32_t inCullArraySize, outCullArraySize;
/// We need to record all SV_ClipDistance/SV_CullDistance decls' types
/// since we need to generate the necessary conversion instructions when
/// accessing the ClipDistance/CullDistance builtins.
SemanticIndexToTypeMap inClipType, outClipType;
SemanticIndexToTypeMap inCullType, outCullType;
/// We also need to keep track of all SV_ClipDistance/SV_CullDistance decls'
/// offsets in the float array.
SemanticIndexToArrayOffsetMap inClipOffset, outClipOffset;
SemanticIndexToArrayOffsetMap inCullOffset, outCullOffset;
enum { kSemanticStrCount = 2 };
/// Keeps track of the semantic strings provided in the source code for the
/// builtins in gl_PerVertex.
llvm::SmallVector<std::string, kSemanticStrCount> inSemanticStrs;
llvm::SmallVector<std::string, kSemanticStrCount> outSemanticStrs;
};
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/AlignmentSizeCalculator.h | //===--- AlignmentSizeCalculator.h - Alignment And Size Calc -----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_ALIGNMENTSIZECALCULATOR_H
#define LLVM_CLANG_LIB_SPIRV_ALIGNMENTSIZECALCULATOR_H
#include "dxc/Support/SPIRVOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/AstTypeProbe.h"
namespace clang {
namespace spirv {
/// The class responsible to translate Clang frontend types into SPIR-V types.
class AlignmentSizeCalculator {
public:
AlignmentSizeCalculator(ASTContext &astCtx, const SpirvCodeGenOptions &opts)
: astContext(astCtx), spvOptions(opts) {}
/// \brief Returns the alignment and size in bytes for the given type
/// according to the given LayoutRule. If the caller has information about
/// whether the type is a row-major matrix, that should also be passed in. If
/// this information is not provided, the function tries to find any majorness
/// attributes on the given type and use it.
///
/// If the type is an array/matrix type, writes the array/matrix stride to
/// stride.
///
/// Note that the size returned is not exactly how many bytes the type
/// will occupy in memory; rather it is used in conjunction with alignment
/// to get the next available location (alignment + size), which means
/// size contains post-paddings required by the given type.
std::pair<uint32_t, uint32_t>
getAlignmentAndSize(QualType type, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor, uint32_t *stride) const;
/// \brief Aligns currentOffset properly to allow packing vectors in the HLSL
/// way: using the element type's alignment as the vector alignment, as long
/// as there is no improper straddle.
/// fieldSize and fieldAlignment are the original size and alignment
/// calculated without considering the HLSL vector relaxed rule.
void alignUsingHLSLRelaxedLayout(QualType fieldType, uint32_t fieldSize,
uint32_t fieldAlignment,
uint32_t *currentOffset) const;
/// \brief Returns true if we use row-major matrix for type. Otherwise,
/// returns false.
bool useRowMajor(llvm::Optional<bool> isRowMajor,
clang::QualType type) const {
return isRowMajor.hasValue() ? isRowMajor.getValue()
: isRowMajorMatrix(spvOptions, type);
}
private:
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation srcLoc = {}) const {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
/// Emits warning to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitWarning(const char (&message)[N],
SourceLocation srcLoc = {}) const {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Warning, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
// Returns the alignment and size in bytes for the given struct
// according to the given LayoutRule.
std::pair<uint32_t, uint32_t>
getAlignmentAndSize(QualType type, const RecordType *structType,
SpirvLayoutRule rule, llvm::Optional<bool> isRowMajor,
uint32_t *stride) const;
private:
ASTContext &astContext; /// AST context
const SpirvCodeGenOptions &spvOptions; /// SPIR-V options
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_ALIGNMENTSIZECALCULATOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/DeclResultIdMapper.h | //===--- DeclResultIdMapper.h - AST Decl to SPIR-V <result-id> mapper ------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_DECLRESULTIDMAPPER_H
#define LLVM_CLANG_LIB_SPIRV_DECLRESULTIDMAPPER_H
#include <tuple>
#include <vector>
#include "dxc/Support/SPIRVOptions.h"
#include "spirv/unified1/spirv.hpp11"
#include "clang/AST/Attr.h"
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "GlPerVertex.h"
#include "StageVar.h"
namespace clang {
namespace spirv {
class SpirvEmitter;
class ResourceVar {
public:
ResourceVar(SpirvVariable *var, const Decl *decl, SourceLocation loc,
const hlsl::RegisterAssignment *r, const VKBindingAttr *b,
const VKCounterBindingAttr *cb, bool counter = false,
bool globalsBuffer = false)
: variable(var), declaration(decl), srcLoc(loc), reg(r), binding(b),
counterBinding(cb), isCounterVar(counter),
isGlobalsCBuffer(globalsBuffer) {}
SpirvVariable *getSpirvInstr() const { return variable; }
const Decl *getDeclaration() const { return declaration; }
SourceLocation getSourceLocation() const { return srcLoc; }
const hlsl::RegisterAssignment *getRegister() const { return reg; }
const VKBindingAttr *getBinding() const { return binding; }
bool isCounter() const { return isCounterVar; }
bool isGlobalsBuffer() const { return isGlobalsCBuffer; }
const VKCounterBindingAttr *getCounterBinding() const {
return counterBinding;
}
private:
SpirvVariable *variable; ///< The variable
const Decl *declaration; ///< The declaration
SourceLocation srcLoc; ///< Source location
const hlsl::RegisterAssignment *reg; ///< HLSL register assignment
const VKBindingAttr *binding; ///< Vulkan binding assignment
const VKCounterBindingAttr *counterBinding; ///< Vulkan counter binding
bool isCounterVar; ///< Couter variable or not
bool isGlobalsCBuffer; ///< $Globals cbuffer or not
};
/// A (instruction-pointer, is-alias-or-not) pair for counter variables
class CounterIdAliasPair {
public:
/// Default constructor to satisfy llvm::DenseMap
CounterIdAliasPair() : counterVar(nullptr), isAlias(false) {}
CounterIdAliasPair(SpirvVariable *var, bool alias)
: counterVar(var), isAlias(alias) {}
/// Returns the pointer to the counter variable alias. This returns a pointer
/// that can be used as the address to a store instruction when storing to an
/// alias counter.
SpirvInstruction *getAliasAddress() const;
/// Returns the pointer to the counter variable. Dereferences first if this is
/// an alias to a counter variable.
SpirvInstruction *getCounterVariable(SpirvBuilder &builder,
SpirvContext &spvContext) const;
/// Stores the counter variable pointed to by src to the curent counter
/// variable. The current counter variable must be an alias.
inline void assign(SpirvInstruction *src, SpirvBuilder &) const;
private:
SpirvVariable *counterVar;
/// Note: legalization specific code
bool isAlias;
};
/// A class for holding all the counter variables associated with a struct's
/// fields
///
/// A alias local RW/Append/Consume structured buffer will need an associated
/// counter variable generated. There are four forms such an alias buffer can
/// be:
///
/// 1 (AssocCounter#1). A stand-alone variable,
/// 2 (AssocCounter#2). A struct field,
/// 3 (AssocCounter#3). A struct containing alias fields,
/// 4 (AssocCounter#4). A nested struct containing alias fields.
///
/// We consider the first two cases as *final* alias entities; The last two
/// cases are called as *intermediate* alias entities, since we can still
/// decompose them and get final alias entities.
///
/// We need to create an associated counter variable no matter which form the
/// alias buffer is in, which means we need to recursively visit all fields of a
/// struct to discover if it's not AssocCounter#1. That means a hierarchy.
///
/// The purpose of this class is to provide such hierarchy in a *flattened* way.
/// Each field's associated counter is represented with an index vector and the
/// counter's <result-id>. For example, for the following structs,
///
/// struct S {
/// RWStructuredBuffer s1;
/// AppendStructuredBuffer s2;
/// };
///
/// struct T {
/// S t1;
/// S t2;
/// };
///
/// An instance of T will have four associated counters for
/// field: indices, <result-id>
/// t1.s1: [0, 0], <id-1>
/// t1.s2: [0, 1], <id-2>
/// t2.s1: [1, 0], <id-3>
/// t2.s2: [1, 1], <id-4>
class CounterVarFields {
public:
CounterVarFields() = default;
/// Registers a field's associated counter.
void append(const llvm::SmallVector<uint32_t, 4> &indices,
SpirvVariable *counter) {
fields.emplace_back(indices, counter);
}
/// Returns the counter associated with the field at the given indices if it
/// has. Returns nullptr otherwise.
const CounterIdAliasPair *
get(const llvm::SmallVectorImpl<uint32_t> &indices) const;
/// Assigns to all the fields' associated counter from the srcFields.
/// Returns true if there are no errors during the assignment.
///
/// This first overload is for assigning a struct as whole: we need to update
/// all the associated counters in the target struct. This second overload is
/// for assigning a potentially nested struct.
bool assign(const CounterVarFields &srcFields, SpirvBuilder &,
SpirvContext &) const;
bool assign(const CounterVarFields &srcFields,
const llvm::SmallVector<uint32_t, 4> &dstPrefix,
const llvm::SmallVector<uint32_t, 4> &srcPrefix, SpirvBuilder &,
SpirvContext &) const;
private:
struct IndexCounterPair {
IndexCounterPair(const llvm::SmallVector<uint32_t, 4> &idx,
SpirvVariable *counter)
: indices(idx), counterVar(counter, true) {}
llvm::SmallVector<uint32_t, 4> indices; ///< Index vector
CounterIdAliasPair counterVar; ///< Counter variable information
};
llvm::SmallVector<IndexCounterPair, 4> fields;
};
/// \brief The class containing mappings from Clang frontend Decls to their
/// corresponding SPIR-V <result-id>s.
///
/// All symbols defined in the AST should be "defined" or registered in this
/// class and have their <result-id>s queried from this class. In the process
/// of defining a Decl, the SPIR-V module builder passed into the constructor
/// will be used to generate all SPIR-V instructions required.
///
/// This class acts as a middle layer to handle the mapping between HLSL
/// semantics and Vulkan stage (builtin/input/output) variables. Such mapping
/// is required because of the semantic differences between DirectX and
/// Vulkan and the essence of HLSL as the front-end language for DirectX.
/// A normal variable attached with some semantic will be translated into a
/// single stage variable if it is of non-struct type. If it is of struct
/// type, the fields with attached semantics will need to be translated into
/// stage variables per Vulkan's requirements.
class DeclResultIdMapper {
/// \brief An internal class to handle binding number allocation.
class BindingSet;
public:
inline DeclResultIdMapper(ASTContext &context, SpirvContext &spirvContext,
SpirvBuilder &spirvBuilder, SpirvEmitter &emitter,
FeatureManager &features,
const SpirvCodeGenOptions &spirvOptions);
/// \brief Returns the SPIR-V builtin variable. Uses sc as default storage
/// class.
SpirvVariable *getBuiltinVar(spv::BuiltIn builtIn, QualType type,
spv::StorageClass sc, SourceLocation);
/// \brief Returns the SPIR-V builtin variable. Tries to infer storage class
/// from the builtin.
SpirvVariable *getBuiltinVar(spv::BuiltIn builtIn, QualType type,
SourceLocation);
/// \brief If var is a raytracing stage variable, returns its entry point,
/// otherwise returns nullptr.
SpirvFunction *getRayTracingStageVarEntryFunction(SpirvVariable *var);
/// \brief Creates the stage output variables by parsing the semantics
/// attached to the given function's parameter or return value and returns
/// true on success. SPIR-V instructions will also be generated to update the
/// contents of the output variables by extracting sub-values from the given
/// storedValue. forPCF should be set to true for handling decls in patch
/// constant function.
///
/// Note that the control point stage output variable of HS should be created
/// by the other overload.
bool createStageOutputVar(const DeclaratorDecl *decl,
SpirvInstruction *storedValue, bool forPCF);
/// \brief Overload for handling HS control point stage ouput variable.
bool createStageOutputVar(const DeclaratorDecl *decl, uint32_t arraySize,
SpirvInstruction *invocationId,
SpirvInstruction *storedValue);
/// \brief Creates the stage input variables by parsing the semantics attached
/// to the given function's parameter and returns true on success. SPIR-V
/// instructions will also be generated to load the contents from the input
/// variables and composite them into one and write to *loadedValue. forPCF
/// should be set to true for handling decls in patch constant function.
bool createStageInputVar(const ParmVarDecl *paramDecl,
SpirvInstruction **loadedValue, bool forPCF);
/// \brief Creates stage variables for raytracing.
SpirvVariable *createRayTracingNVStageVar(spv::StorageClass sc,
const VarDecl *decl);
SpirvVariable *createRayTracingNVStageVar(spv::StorageClass sc, QualType type,
std::string name, bool isPrecise,
bool isNointerp);
/// \brief Creates the taskNV stage variables for payload struct variable
/// and returns true on success. SPIR-V instructions will also be generated
/// to load/store the contents from/to *value. payloadMemOffset is incremented
/// based on payload struct member size, alignment and offset, and SPIR-V
/// decorations PerTaskNV and Offset are assigned to each member.
bool createPayloadStageVars(const hlsl::SigPoint *sigPoint,
spv::StorageClass sc, const NamedDecl *decl,
bool asInput, QualType type,
const llvm::StringRef namePrefix,
SpirvInstruction **value,
uint32_t payloadMemOffset = 0);
/// \brief Creates a function-scope paramter in the current function and
/// returns its instruction. dbgArgNumber is used to specify the argument
/// number of param among function parameters, which will be used for the
/// debug information. Note that dbgArgNumber for the first function
/// parameter must have "1", not "0", which is what Clang generates for
/// LLVM debug metadata.
SpirvFunctionParameter *createFnParam(const ParmVarDecl *param,
uint32_t dbgArgNumber = 0);
/// \brief Creates the counter variable associated with the given param.
/// This is meant to be used for forward-declared functions and this objects
/// of methods.
///
/// Note: legalization specific code
inline void createFnParamCounterVar(const VarDecl *param);
/// \brief Creates a function-scope variable in the current function and
/// returns its instruction.
SpirvVariable *createFnVar(const VarDecl *var,
llvm::Optional<SpirvInstruction *> init);
/// \brief Creates a file-scope variable and returns its instruction.
SpirvVariable *createFileVar(const VarDecl *var,
llvm::Optional<SpirvInstruction *> init);
/// Creates a global variable for resource heaps containing elements of type
/// |type|.
SpirvVariable *createResourceHeap(const VarDecl *var, QualType type);
/// \brief Creates an external-visible variable and returns its instruction.
SpirvVariable *createExternVar(const VarDecl *var);
/// \brief Creates an external-visible variable of type |type| and returns its
/// instruction.
SpirvVariable *createExternVar(const VarDecl *var, QualType type);
/// \brief Returns an OpString instruction that represents the given VarDecl.
/// VarDecl must be a variable of string type.
///
/// This function inspects the VarDecl for an initialization expression. If
/// initialization expression is not found, it will emit an error because the
/// variable cannot be deduced to an OpString literal, and string variables do
/// not exist in SPIR-V.
///
/// Note: HLSL has the 'string' type which can be used for rare purposes such
/// as printf (SPIR-V's DebugPrintf). SPIR-V does not have a 'char' or
/// 'string' type, and therefore any variable of such type is never created.
/// The string literal is evaluated when needed and an OpString is generated
/// for it.
SpirvInstruction *createOrUpdateStringVar(const VarDecl *);
/// \brief Returns an instruction that represents the given VarDecl.
/// VarDecl must be a variable of vk::ext_result_id<Type> type.
///
/// This function inspects the VarDecl for an initialization expression. If
/// initialization expression is not found, it will emit an error because the
/// variable with result id requires an initialization.
SpirvInstruction *createResultId(const VarDecl *var);
/// \brief Creates an Enum constant.
void createEnumConstant(const EnumConstantDecl *decl);
/// \brief Creates a cbuffer/tbuffer from the given decl.
///
/// In the AST, cbuffer/tbuffer is represented as a HLSLBufferDecl, which is
/// a DeclContext, and all fields in the buffer are represented as VarDecls.
/// We cannot do the normal translation path, which will translate a field
/// into a standalone variable. We need to create a single SPIR-V variable
/// for the whole buffer. When we refer to the field VarDecl later, we need
/// to do an extra OpAccessChain to get its pointer from the SPIR-V variable
/// standing for the whole buffer.
SpirvVariable *createCTBuffer(const HLSLBufferDecl *decl);
/// \brief Creates a PushConstant block from the given decl.
SpirvVariable *createPushConstant(const VarDecl *decl);
/// \brief Creates the $Globals cbuffer.
void createGlobalsCBuffer(const VarDecl *var);
/// \brief Returns the suitable type for the given decl, considering the
/// given decl could possibly be created as an alias variable. If true, a
/// pointer-to-the-value type will be returned, otherwise, just return the
/// normal value type. For an alias variable having a associated counter, the
/// counter variable will also be emitted.
///
/// If the type is for an alias variable, writes true to *shouldBeAlias and
/// writes storage class, layout rule, and valTypeId to *info.
///
/// Note: legalization specific code
QualType
getTypeAndCreateCounterForPotentialAliasVar(const DeclaratorDecl *var,
bool *shouldBeAlias = nullptr);
/// \brief Sets the entry function.
void setEntryFunction(SpirvFunction *fn) { entryFunction = fn; }
/// \brief If the given decl is an implicit VarDecl that evaluates to a
/// constant, it evaluates the constant and registers the resulting SPIR-V
/// instruction in the astDecls map. Otherwise returns without doing anything.
///
/// Note: There are many cases where the front-end might create such implicit
/// VarDecls (such as some ray tracing enums).
void tryToCreateImplicitConstVar(const ValueDecl *);
/// \brief Creates instructions to copy output stage variables defined by
/// outputPatchDecl to hullMainOutputPatch that is a variable for the
/// OutputPatch argument passing. outputControlPointType is the template
/// parameter type of OutputPatch and numOutputControlPoints is the number of
/// output control points.
void copyHullOutStageVarsToOutputPatch(SpirvInstruction *hullMainOutputPatch,
const ParmVarDecl *outputPatchDecl,
QualType outputControlPointType,
uint32_t numOutputControlPoints);
/// \brief An enum class for representing what the DeclContext is used for
enum class ContextUsageKind {
CBuffer,
TBuffer,
PushConstant,
Globals,
ShaderRecordBufferNV,
ShaderRecordBufferKHR
};
/// Raytracing specific functions
/// \brief Creates a ShaderRecordBufferEXT or ShaderRecordBufferNV block from
/// the given decl.
SpirvVariable *createShaderRecordBuffer(const VarDecl *decl,
ContextUsageKind kind);
SpirvVariable *createShaderRecordBuffer(const HLSLBufferDecl *decl,
ContextUsageKind kind);
// Records the TypedefDecl or TypeAliasDecl of vk::SpirvType so that any
// required capabilities and extensions can be added if the type is used.
void recordsSpirvTypeAlias(const Decl *decl);
private:
/// The struct containing SPIR-V information of a AST Decl.
struct DeclSpirvInfo {
/// Default constructor to satisfy DenseMap
DeclSpirvInfo() : instr(nullptr), indexInCTBuffer(-1) {}
DeclSpirvInfo(SpirvInstruction *instr_, int index = -1)
: instr(instr_), indexInCTBuffer(index) {}
/// Implicit conversion to SpirvInstruction*.
operator SpirvInstruction *() const { return instr; }
SpirvInstruction *instr;
/// Value >= 0 means that this decl is a VarDecl inside a cbuffer/tbuffer
/// and this is the index; value < 0 means this is just a standalone decl.
int indexInCTBuffer;
};
/// The struct containing the data needed to create the input and output
/// variables for the decl.
struct StageVarDataBundle {
// The declaration of the variable for which we need to create the stage
// variables.
const NamedDecl *decl;
// The HLSL semantic to apply to the variable. Note that this could be
// different than the semantic attached to decl because it could inherit
// the semantic from the parent declaration if this declaration is a member.
SemanticInfo *semantic;
// True if the variable is not suppose to be interpolated. Note that we
// cannot just look at decl to determine this because the attribute might
// have been applied to a parent declaration.
bool asNoInterp;
// The sigPoint is the shader stage that this variable should be added to,
// and whether it is an input or output.
const hlsl::SigPoint *sigPoint;
// The type to use for the new variable. There are cases where the type
// might be different. See the call sites for createStageVars.
QualType type;
// If the shader stage for the variable is HS, DS, or GS, the SPIR-V
// requires that the stage variable is an array of type. The arraySize gives
// the size for that array.
uint32_t arraySize;
// A prefix to use for the name of the variable.
llvm::StringRef namePrefix;
// If arraySize is not zero, invocationId gives the index to used when
// generating a write to the stage variable.
llvm::Optional<SpirvInstruction *> invocationId;
};
/// \brief Returns the SPIR-V information for the given decl.
/// Returns nullptr if no such decl was previously registered.
const DeclSpirvInfo *getDeclSpirvInfo(const ValueDecl *decl) const;
/// \brief Creates DeclSpirvInfo using the given instr and index. It creates a
/// clone variable if it is CTBuffer including matrix 1xN with FXC memory
/// layout.
DeclSpirvInfo createDeclSpirvInfo(SpirvInstruction *instr,
int index = -1) const {
if (auto *clone = spvBuilder.initializeCloneVarForFxcCTBuffer(instr))
instr = clone;
return DeclSpirvInfo(instr, index);
}
public:
/// \brief Returns the information for the given decl.
///
/// This method will panic if the given decl is not registered.
SpirvInstruction *getDeclEvalInfo(const ValueDecl *decl, SourceLocation loc,
SourceRange range = {});
/// \brief Returns the instruction pointer for the given function if already
/// registered; otherwise, treats the given function as a normal decl and
/// returns a newly created instruction for it.
SpirvFunction *getOrRegisterFn(const FunctionDecl *fn);
/// Registers that the given decl should be translated into the given spec
/// constant.
void registerSpecConstant(const VarDecl *decl,
SpirvInstruction *specConstant);
/// \brief Returns the associated counter's (instr-ptr, is-alias-or-not)
/// pair for the given {RW|Append|Consume}StructuredBuffer variable.
/// If indices is not nullptr, walks trhough the fields of the decl, expected
/// to be of struct type, using the indices to find the field. Returns nullptr
/// if the given decl has no associated counter variable created.
const CounterIdAliasPair *getCounterIdAliasPair(
const DeclaratorDecl *decl,
const llvm::SmallVector<uint32_t, 4> *indices = nullptr);
/// \brief Returns the associated counter's (instr-ptr, is-alias-or-not)
/// pair for the given {RW|Append|Consume}StructuredBuffer variable. Creates
/// counter for RW buffer if not already created.
const CounterIdAliasPair *
createOrGetCounterIdAliasPair(const DeclaratorDecl *decl);
/// \brief Returns all the associated counters for the given decl. The decl is
/// expected to be a struct containing alias RW/Append/Consume structured
/// buffers. Returns nullptr if it does not.
const CounterVarFields *getCounterVarFields(const DeclaratorDecl *decl);
/// \brief Returns all defined stage (builtin/input/ouput) variables for the
/// entry point function entryPoint in this mapper.
std::vector<SpirvVariable *>
collectStageVars(SpirvFunction *entryPoint) const;
/// \brief Writes out the contents in the function parameter for the GS
/// stream output to the corresponding stage output variables in a recursive
/// manner. Returns true on success, false if errors occur.
///
/// decl is the Decl with semantic string attached and will be used to find
/// the stage output variable to write to, value is the SPIR-V variable to
/// read data from.
///
/// This method is specially for writing back per-vertex data at the time of
/// OpEmitVertex in GS.
bool writeBackOutputStream(const NamedDecl *decl, QualType type,
SpirvInstruction *value, SourceRange range = {});
/// \brief Reciprocates to get the multiplicative inverse of SV_Position.w
/// if requested.
SpirvInstruction *invertWIfRequested(SpirvInstruction *position,
SourceLocation loc);
/// \brief Decorates all stage input and output variables with proper
/// location and returns true on success.
///
/// This method will write the location assignment into the module under
/// construction.
inline bool decorateStageIOLocations();
/// \brief Decorates all resource variables with proper set and binding
/// numbers and returns true on success.
///
/// This method will write the set and binding number assignment into the
/// module under construction.
bool decorateResourceBindings();
/// \brief Decorates resource variables with Coherent decoration if they
/// are declared as globallycoherent.
bool decorateResourceCoherent();
/// \brief Returns whether the SPIR-V module requires SPIR-V legalization
/// passes run to make it legal.
bool requiresLegalization() const { return needsLegalization; }
/// \brief Returns whether the SPIR-V module requires an optimization pass to
/// flatten array/structure of resources.
bool requiresFlatteningCompositeResources() const {
return needsFlatteningCompositeResources;
}
/// \brief Returns the given decl's HLSL semantic information.
static SemanticInfo getStageVarSemantic(const NamedDecl *decl);
/// \brief Returns SPIR-V instruction for given stage var decl.
SpirvInstruction *getStageVarInstruction(const DeclaratorDecl *decl) {
auto *value = stageVarInstructions.lookup(decl);
assert(value);
return value;
}
/// Decorate with spirv intrinsic attributes with lamda function variable
/// check
void decorateWithIntrinsicAttrs(
const NamedDecl *decl, SpirvVariable *varInst,
llvm::function_ref<void(VKDecorateExtAttr *)> extraFunctionForDecoAttr =
[](VKDecorateExtAttr *) {});
/// \brief Creates instructions to load the value of output stage variable
/// defined by outputPatchDecl and store it to ptr. Since the output stage
/// variable for OutputPatch is an array whose number of elements is the
/// number of output control points, we need ctrlPointID to indicate which
/// output control point is the target for copy. outputControlPointType is the
/// template parameter type of OutputPatch.
void storeOutStageVarsToStorage(const DeclaratorDecl *outputPatchDecl,
SpirvConstant *ctrlPointID,
QualType outputControlPointType,
SpirvInstruction *ptr);
spv::ExecutionMode getInterlockExecutionMode();
/// Records any Spir-V capabilities and extensions for the given type so
/// they will be added to the SPIR-V module. The capabilities and extension
/// required for the type will be sourced from the decls that were recorded
/// using `recordSpirvTypeAlias`.
void registerCapabilitiesAndExtensionsForType(const TypedefType *type);
private:
/// \brief Wrapper method to create a fatal error message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitFatalError(const char (&message)[N],
SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Fatal, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create an error message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Error, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create a warning message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitWarning(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Warning, message);
return diags.Report(loc, diagId);
}
/// \brief Wrapper method to create a note message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitNote(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Note, message);
return diags.Report(loc, diagId);
}
/// \brief Checks whether some semantic is used more than once and returns
/// true if no such cases. Returns false otherwise.
bool checkSemanticDuplication(bool forInput);
/// \brief Checks whether some location/index is used more than once and
/// returns true if no such cases. Returns false otherwise.
bool isDuplicatedStageVarLocation(
llvm::DenseSet<StageVariableLocationInfo, StageVariableLocationInfo>
*stageVariableLocationInfo,
const StageVar &var, uint32_t location, uint32_t index);
/// \brief Decorates vars with locations assigned by nextLocs.
/// stageVariableLocationInfo will be used to check the duplication of stage
/// variable locations.
bool assignLocations(
const std::vector<const StageVar *> &vars,
llvm::function_ref<uint32_t(uint32_t)> nextLocs,
llvm::DenseSet<StageVariableLocationInfo, StageVariableLocationInfo>
*stageVariableLocationInfo);
/// \bried Decorates used Resource/Sampler descriptor heaps with the correct
/// binding/set decorations.
void decorateResourceHeapsBindings(BindingSet &bindingSet);
/// \brief Returns a map that divides all of the shader stage variables into
/// separate vectors for each entry point.
llvm::DenseMap<const SpirvFunction *, SmallVector<StageVar, 8>>
getStageVarsPerFunction();
/// \brief Decorates all stage variables in `functionStageVars` with proper
/// location and returns true on success.
///
/// It is assumed that all variables in `functionStageVars` belong to the same
/// entry point.
///
/// This method will write the location assignment into the module under
/// construction.
bool finalizeStageIOLocationsForASingleEntryPoint(
bool forInput, ArrayRef<StageVar> functionStageVars);
/// \brief Decorates all stage input (if forInput is true) or output (if
/// forInput is false) variables with proper location and returns true on
/// success.
///
/// This method will write the location assignment into the module under
/// construction.
bool finalizeStageIOLocations(bool forInput);
/// Creates a variable of struct type with explicit layout decorations.
/// The sub-Decls in the given DeclContext will be treated as the struct
/// fields. The struct type will be named as typeName, and the variable
/// will be named as varName.
///
/// This method should only be used for cbuffers/ContantBuffers, tbuffers/
/// TextureBuffers, and PushConstants. usageKind must be set properly
/// depending on the usage kind.
///
/// If arraySize is 0, the variable will be created as a struct ; if arraySize
/// is > 0, the variable will be created as an array; if arraySize is -1, the
/// variable will be created as a runtime array.
///
/// Panics if the DeclContext is neither HLSLBufferDecl or RecordDecl.
SpirvVariable *createStructOrStructArrayVarOfExplicitLayout(
const DeclContext *decl, llvm::ArrayRef<int> arraySize,
ContextUsageKind usageKind, llvm::StringRef typeName,
llvm::StringRef varName);
/// Creates a variable of struct type with explicit layout decorations.
/// The sub-Decls in the given DeclContext will be treated as the struct
/// fields. The struct type will be named as typeName, and the variable
/// will be named as varName.
///
/// This method should only be used for cbuffers/ContantBuffers, tbuffers/
/// TextureBuffers, and PushConstants. usageKind must be set properly
/// depending on the usage kind.
///
/// If arraySize is 0, the variable will be created as a struct ; if arraySize
/// is > 0, the variable will be created as an array; if arraySize is -1, the
/// variable will be created as a runtime array.
///
/// Panics if the DeclContext is neither HLSLBufferDecl or RecordDecl.
SpirvVariable *createStructOrStructArrayVarOfExplicitLayout(
const DeclContext *decl, int arraySize, ContextUsageKind usageKind,
llvm::StringRef typeName, llvm::StringRef varName);
/// Creates all of the stage variables that must be generated for the given
/// stage variable data. Returns true on success.
///
/// stageVarData: See the definition of StageVarDataBundle to see how that
/// data is used.
///
/// asInput: True if the stage variable is an input.
///
/// TODO(s-perron): a variable that is an input or an output depending on
/// value of a flag is very hard to read. This function should be split up
/// and flag variables removed.
///
/// [in/out] value: If `asInput` is true, this is an
/// output, and will be an instruction that loads the stage variable. If
/// `asInput` is false, then it is an input to createStageVars, and contains
/// the value to be stored in the new stage variable.
///
/// noWriteBack: If true, the newly created stage variable will not be written
/// to.
bool createStageVars(StageVarDataBundle &stageVarData, bool asInput,
SpirvInstruction **value, bool noWriteBack);
// Creates a variable to represent the output variable, which must be a
// structure. If `noWriteBack` is false, then `value` will be written to the
// new variable. Returns true if successful.
//
// stageVarData: The data needed to create the stage variable.
//
// noWriteBack: A flag to indicate if the variable should be written or not.
//
// value: The value to be written to the newly create variable.
bool createStructOutputVar(const StageVarDataBundle &stageVarData,
SpirvInstruction *value, bool noWriteBack);
// Creates a variable to represent the input variable, which must be a
// structure. The value is loaded and the instruction with the final value is
// return.
//
// stageVarData: The data needed to create the stage variable.
//
// noWriteBack: A flag to indicate if the variable should be written or not.
SpirvInstruction *createStructInputVar(const StageVarDataBundle &stageVarData,
bool noWriteBack);
// Store `value` to the shader output variable `varInstr`. Since the type
// could be different, stageVarData is used to know how to convert `value`
// into the correct type for `varInstr`.
//
// varInstr: the output variable that corresponds to `stageVarData`. It must
// not be a struct.
//
// value: The value to be written to the create variable.
//
// stageVarData: The data that was used to create `varInstr`.
void storeToShaderOutputVariable(SpirvVariable *varInstr,
SpirvInstruction *value,
const StageVarDataBundle &stageVarData);
// Loads shader input variable `varInstr`, and modifies the value to match the
// type in stageVarData. The struct stageVarData is used to know how to
// convert the value loaded from `varInstr` into the correct type.
//
// varInstr: the input variable that corresponds to `stageVarData`. It must
// not be a struct.
//
// stageVarData: The data that was used to create `varInstr`.
SpirvInstruction *
loadShaderInputVariable(SpirvVariable *varInstr,
const StageVarDataBundle &stageVarData);
// Creates a function scope variable to represent the "SV_InstanceID"
// semantic, which it not immediately available in SPIR-V. Its value will be
// set by subtracting the values of the given InstanceIndex and base instance
// variables.
//
// instanceIndexVar: The SPIR-V input variable that decorated with
// InstanceIndex.
//
// baseInstanceVar: The SPIR-V input variable that is decorated with
// BaseInstance.
SpirvVariable *getInstanceIdFromIndexAndBase(SpirvVariable *instanceIndexVar,
SpirvVariable *baseInstanceVar);
// Creates a function scope variable to represent the "SV_VertexID"
// semantic, which is not immediately available in SPIR-V. Its value will be
// set by subtracting the values of the given InstanceIndex and base instance
// variables.
//
// vertexIndexVar: The SPIR-V input variable decorated with
// vertexIndex.
//
// baseVertexVar: The SPIR-V input variable decorated with
// BaseVertex.
SpirvVariable *getVertexIdFromIndexAndBase(SpirvVariable *vertexIndexVar,
SpirvVariable *baseVertexVar);
// Creates and returns a variable that is the BaseInstance builtin input. The
// variable is also added to the list of stage variable `this->stageVars`. Its
// type will be a 32-bit integer.
//
// sigPoint: the signature point identifying which shader stage the variable
// will be used in.
//
// type: The type to use for the new variable. Must be int or unsigned int.
SpirvVariable *getBaseInstanceVariable(const hlsl::SigPoint *sigPoint,
QualType type);
// Creates and returns a variable that is the BaseVertex builtin input. The
// variable is also added to the list of stage variable `this->stageVars`. Its
// type will be a 32-bit integer.
//
// sigPoint: the signature point identifying which shader stage the variable
// will be used in.
//
// type: The type to use for the new variable. Must be int or unsigned int.
SpirvVariable *getBaseVertexVariable(const hlsl::SigPoint *sigPoint,
QualType type);
// Creates and return a new interface variable from the information provided.
// The new variable with be add to `this->StageVars`.
//
//
// stageVarData: the data needed to create the interface variable. See the
// declaration of StageVarDataBundle for the details.
SpirvVariable *
createSpirvInterfaceVariable(const StageVarDataBundle &stageVarData);
// Returns the type that the SPIR-V input or output variable must have to
// correspond to a variable with the given information.
//
// stageVarData: the data needed to create the interface variable. See the
// declaration of StageVarDataBundle for the details.
QualType getTypeForSpirvStageVariable(const StageVarDataBundle &stageVarData);
// Returns true if all of the stage variable data is consistent with a valid
// shader stage variable. Issues an error and returns false otherwise.
bool validateShaderStageVar(const StageVarDataBundle &stageVarData);
/// Returns true if all vk:: attributes usages are valid.
bool validateVKAttributes(const NamedDecl *decl);
/// Returns true if all vk::builtin usages are valid.
bool validateVKBuiltins(const StageVarDataBundle &stageVarData);
// Returns true if the type in stageVarData is compatible with the rest of the
// data. Issues an error and returns false otherwise.
bool validateShaderStageVarType(const StageVarDataBundle &stageVarData);
// Returns true if the semantic is consistent wit the rest of the given data.
bool isValidSemanticInShaderModel(const StageVarDataBundle &stageVarData);
/// Creates the SPIR-V variable instruction for the given StageVar and returns
/// the instruction. Also sets whether the StageVar is a SPIR-V builtin and
/// its storage class accordingly. name will be used as the debug name when
/// creating a stage input/output variable.
SpirvVariable *createSpirvStageVar(StageVar *, const NamedDecl *decl,
const llvm::StringRef name,
SourceLocation);
/// Methods for creating counter variables associated with the given decl.
/// Creates assoicated counter variables for all AssocCounter cases (see the
/// comment of CounterVarFields).
void createCounterVarForDecl(const DeclaratorDecl *decl);
/// Creates the associated counter variable for final RW/Append/Consume
/// structured buffer. Handles AssocCounter#1 and AssocCounter#2 (see the
/// comment of CounterVarFields).
///
/// declId is the SPIR-V instruction for the given decl. It should be non-zero
/// for non-alias buffers.
///
/// The counter variable will be created as an alias variable (of
/// pointer-to-pointer type in Private storage class) if isAlias is true.
///
/// Note: isAlias - legalization specific code
void
createCounterVar(const DeclaratorDecl *decl, SpirvInstruction *declInstr,
bool isAlias,
const llvm::SmallVector<uint32_t, 4> *indices = nullptr);
/// Creates all assoicated counter variables by recursively visiting decl's
/// fields. Handles AssocCounter#3 and AssocCounter#4 (see the comment of
/// CounterVarFields).
inline void createFieldCounterVars(const DeclaratorDecl *decl);
void createFieldCounterVars(const DeclaratorDecl *rootDecl,
const DeclaratorDecl *decl,
llvm::SmallVector<uint32_t, 4> *indices);
/// Decorates varInstr of the given asType with proper interpolation modes
/// considering the attributes on the given decl.
void decorateInterpolationMode(const NamedDecl *decl, QualType asType,
SpirvVariable *varInstr,
const SemanticInfo semanticInfo);
/// Returns the proper SPIR-V storage class (Input or Output) for the given
/// SigPoint.
spv::StorageClass getStorageClassForSigPoint(const hlsl::SigPoint *);
/// Returns true if the given SPIR-V stage variable has Input storage class.
inline bool isInputStorageClass(const StageVar &v);
/// Creates DebugGlobalVariable and returns it if rich debug information
/// generation is enabled. Otherwise, returns nullptr.
SpirvDebugGlobalVariable *createDebugGlobalVariable(SpirvVariable *var,
const QualType &type,
const SourceLocation &loc,
const StringRef &name);
/// Determines the register type for a resource that does not have an
/// explicit register() declaration. Returns true if it is able to
/// determine the register type and will set |*registerTypeOut| to
/// 'u', 's', 'b', or 't'. Assumes |registerTypeOut| to be non-nullptr.
///
/// Uses the following mapping of HLSL types to register spaces:
/// t - for shader resource views (SRV)
/// TEXTURE1D
/// TEXTURE1DARRAY
/// TEXTURE2D
/// TEXTURE2DARRAY
/// TEXTURE3D
/// TEXTURECUBE
/// TEXTURECUBEARRAY
/// TEXTURE2DMS
/// TEXTURE2DMSARRAY
/// STRUCTUREDBUFFER
/// BYTEADDRESSBUFFER
/// BUFFER
/// TBUFFER
///
/// s - for samplers
/// SAMPLER
/// SAMPLER1D
/// SAMPLER2D
/// SAMPLER3D
/// SAMPLERCUBE
/// SAMPLERSTATE
/// SAMPLERCOMPARISONSTATE
///
/// u - for unordered access views (UAV)
/// RWBYTEADDRESSBUFFER
/// RWSTRUCTUREDBUFFER
/// APPENDSTRUCTUREDBUFFER
/// CONSUMESTRUCTUREDBUFFER
/// RWBUFFER
/// RWTEXTURE1D
/// RWTEXTURE1DARRAY
/// RWTEXTURE2D
/// RWTEXTURE2DARRAY
/// RWTEXTURE3D
///
/// b - for constant buffer views (CBV)
/// CBUFFER
/// CONSTANTBUFFER
bool getImplicitRegisterType(const ResourceVar &var,
char *registerTypeOut) const;
/// \brief Decorates stage variable with spirv intrinsic attributes. If
/// it is BuiltIn or Location decoration, sets locOrBuiltinDecorateAttr
/// of stageVar as true.
void decorateStageVarWithIntrinsicAttrs(const NamedDecl *decl,
StageVar *stageVar,
SpirvVariable *varInst);
/// \brief Records which execution mode should be used for rasterizer order
/// views.
void setInterlockExecutionMode(spv::ExecutionMode mode);
private:
SpirvBuilder &spvBuilder;
SpirvEmitter &theEmitter;
FeatureManager &featureManager;
const SpirvCodeGenOptions &spirvOptions;
ASTContext &astContext;
SpirvContext &spvContext;
DiagnosticsEngine &diags;
SpirvFunction *entryFunction;
/// Mapping of all Clang AST decls to their instruction pointers.
llvm::DenseMap<const ValueDecl *, DeclSpirvInfo> astDecls;
llvm::DenseMap<const ValueDecl *, SpirvFunction *> astFunctionDecls;
/// Vector of all defined stage variables.
llvm::SmallVector<StageVar, 8> stageVars;
/// Mapping from Clang AST decls to the corresponding stage variables.
/// This field is only used by GS for manually emitting vertices, when
/// we need to query the output stage variables involved in writing back. For
/// other cases, stage variable reading and writing is done at the time of
/// creating that stage variable, so that we don't need to query them again
/// for reading and writing.
llvm::DenseMap<const ValueDecl *, SpirvVariable *> stageVarInstructions;
/// Vector of all defined resource variables.
llvm::SmallVector<ResourceVar, 8> resourceVars;
/// Mapping from {RW|Append|Consume}StructuredBuffers to their
/// counter variables' (instr-ptr, is-alias-or-not) pairs
///
/// conterVars holds entities of AssocCounter#1, fieldCounterVars holds
/// entities of the rest.
llvm::DenseMap<const DeclaratorDecl *, CounterIdAliasPair> counterVars;
llvm::DenseMap<const DeclaratorDecl *, CounterVarFields> fieldCounterVars;
/// Mapping from clang declarator to SPIR-V declaration instruction.
/// This is used to defer creation of counter for RWStructuredBuffer
/// until a Increment/DecrementCounter method is called on it.
llvm::DenseMap<const DeclaratorDecl *, SpirvInstruction *> declRWSBuffers;
/// The execution mode to use for rasterizer ordered views. Should be set to
/// PixelInterlockOrderedEXT (default), SampleInterlockOrderedEXT, or
/// ShadingRateInterlockOrderedEXT. This will be set based on which semantics
/// are present in input variables, and will be used to determine which
/// execution mode to attach to the entry point if it uses rasterizer ordered
/// views.
llvm::Optional<spv::ExecutionMode> interlockExecutionMode;
/// The SPIR-V builtin variables accessed by WaveGetLaneCount(),
/// WaveGetLaneIndex() and ray tracing builtins.
///
/// These are the only few cases where SPIR-V builtin variables are accessed
/// using HLSL intrinsic function calls. All other builtin variables are
/// accessed using stage IO variables.
llvm::DenseMap<uint32_t, SpirvVariable *> builtinToVarMap;
/// Maps from a raytracing stage variable to the entry point that variable is
/// for.
llvm::DenseMap<SpirvVariable *, SpirvFunction *>
rayTracingStageVarToEntryPoints;
/// Whether the translated SPIR-V binary needs legalization.
///
/// The following cases will require legalization:
///
/// 1. Opaque types (textures, samplers) within structs
/// 2. Structured buffer aliasing
/// 3. Using SPIR-V instructions not allowed in the currect shader stage
///
/// This covers the second case:
///
/// When we have a kind of structured or byte buffer, meaning one of the
/// following
///
/// * StructuredBuffer
/// * RWStructuredBuffer
/// * AppendStructuredBuffer
/// * ConsumeStructuredBuffer
/// * ByteAddressStructuredBuffer
/// * RWByteAddressStructuredBuffer
///
/// and assigning to them (using operator=, passing in as function parameter,
/// returning as function return), we need legalization.
///
/// All variable definitions (including static/non-static local/global
/// variables, function parameters/returns) will gain another level of
/// pointerness, unless they will generate externally visible SPIR-V
/// variables. So variables and parameters will be of pointer-to-pointer type,
/// while function returns will be of pointer type. We adopt this mechanism to
/// convey to the legalization passes that they are *alias* variables, and
/// all accesses should happen to the aliased-to-variables. Loading such an
/// alias variable will give the pointer to the aliased-to-variable, while
/// storing into such an alias variable should write the pointer to the
/// aliased-to-variable.
///
/// Based on the above, CodeGen should take care of the following AST nodes:
///
/// * Definition of alias variables: should add another level of pointers
/// * Assigning non-alias variables to alias variables: should avoid the load
/// over the non-alias variables
/// * Accessing alias variables: should load the pointer first and then
/// further compose access chains.
///
/// Note that the associated counters bring about their own complication.
/// We also need to apply the alias mechanism for them.
///
/// If this is true, SPIRV-Tools legalization passes will be executed after
/// the translation to legalize the generated SPIR-V binary.
///
/// Note: legalization specific code
bool needsLegalization;
/// Whether the translated SPIR-V binary needs flattening of composite
/// resources.
///
/// If the source HLSL contains global structure of resources, we need to run
/// an additional SPIR-V optimization pass to flatten such structures.
bool needsFlatteningCompositeResources;
uint32_t perspBaryCentricsIndex, noPerspBaryCentricsIndex;
llvm::SmallVector<const TypedefNameDecl *, 4> typeAliasesWithAttributes;
public:
/// The gl_PerVertex structs for both input and output
GlPerVertex glPerVertex;
};
hlsl::Semantic::Kind SemanticInfo::getKind() const {
assert(semantic);
return semantic->GetKind();
}
bool SemanticInfo::isTarget() const {
return semantic && semantic->GetKind() == hlsl::Semantic::Kind::Target;
}
void CounterIdAliasPair::assign(SpirvInstruction *src,
SpirvBuilder &builder) const {
assert(isAlias);
builder.createStore(counterVar, src, /* SourceLocation */ {});
}
DeclResultIdMapper::DeclResultIdMapper(ASTContext &context,
SpirvContext &spirvContext,
SpirvBuilder &spirvBuilder,
SpirvEmitter &emitter,
FeatureManager &features,
const SpirvCodeGenOptions &options)
: spvBuilder(spirvBuilder), theEmitter(emitter), featureManager(features),
spirvOptions(options), astContext(context), spvContext(spirvContext),
diags(context.getDiagnostics()), entryFunction(nullptr),
needsLegalization(false), needsFlatteningCompositeResources(false),
perspBaryCentricsIndex(2), noPerspBaryCentricsIndex(2),
glPerVertex(context, spirvContext, spirvBuilder) {}
bool DeclResultIdMapper::decorateStageIOLocations() {
if (spvContext.isRay() || spvContext.isAS()) {
// No location assignment for any raytracing stage variables or
// amplification shader variables
return true;
}
// Try both input and output even if input location assignment failed
return (int)finalizeStageIOLocations(true) &
(int)finalizeStageIOLocations(false);
}
bool DeclResultIdMapper::isInputStorageClass(const StageVar &v) {
return getStorageClassForSigPoint(v.getSigPoint()) ==
spv::StorageClass::Input;
}
void DeclResultIdMapper::createFnParamCounterVar(const VarDecl *param) {
createCounterVarForDecl(param);
}
void DeclResultIdMapper::createFieldCounterVars(const DeclaratorDecl *decl) {
llvm::SmallVector<uint32_t, 4> indices;
createFieldCounterVars(decl, decl, &indices);
}
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/CMakeLists.txt | set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangSPIRV
AlignmentSizeCalculator.cpp
AstTypeProbe.cpp
BlockReadableOrder.cpp
CapabilityVisitor.cpp
ConstEvaluator.cpp
DeclResultIdMapper.cpp
DebugTypeVisitor.cpp
EmitSpirvAction.cpp
EmitVisitor.cpp
FeatureManager.cpp
GlPerVertex.cpp
InitListHandler.cpp
LiteralTypeVisitor.cpp
LowerTypeVisitor.cpp
SortDebugInfoVisitor.cpp
NonUniformVisitor.cpp
PreciseVisitor.cpp
PervertexInputVisitor.cpp
RawBufferMethods.cpp
RelaxedPrecisionVisitor.cpp
RemoveBufferBlockVisitor.cpp
SpirvBasicBlock.cpp
SpirvBuilder.cpp
SpirvContext.cpp
SpirvEmitter.cpp
SpirvFunction.cpp
SpirvInstruction.cpp
SpirvModule.cpp
SpirvType.cpp
SignaturePackingUtil.cpp
String.cpp
LINK_LIBS
clangAST
clangBasic
clangFrontend
clangLex
SPIRV-Tools-opt
)
target_include_directories(clangSPIRV PUBLIC ${SPIRV_HEADER_INCLUDE_DIR})
target_include_directories(clangSPIRV PRIVATE ${SPIRV_TOOLS_INCLUDE_DIR})
add_dependencies(clangSPIRV TablegenHLSLOptions)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/InitListHandler.h | //===------- InitListHandler.h - Initializer List Handler -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file defines an initalizer list handler that takes in an InitListExpr
// and emits the corresponding SPIR-V instructions for it.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_INITLISTHANDLER_H
#define LLVM_CLANG_LIB_SPIRV_INITLISTHANDLER_H
#include <deque>
#include <utility>
#include <vector>
#include "clang/AST/Expr.h"
#include "clang/Basic/Diagnostic.h"
#include "SpirvEmitter.h"
namespace clang {
namespace spirv {
/// The class for handling initializer lists.
///
/// Initializer lists in HLSL are very flexible; as long as the initializer
/// list provides the exact number of elements required by the type of the
/// object to be initialized, it will highly likely be accepted. To handle
/// such flexibility, composite extraction, recursive composite construction,
/// and proper casting is necessary for some cases. For example:
///
/// float4x4 mat = {scalar, vec1, vec2, vec3, vec2, float2(1, 2), 3, vec4};
/// The first element vector is composed of scalar, vec1, and vec2; the second
/// element vector is composed of vec3 and the first element of vec2; etc.
///
/// The AST is designed to omit the composite extraction and construction. It
/// also does not have casts to the expected types and from lvalues to rvalues.
///
/// Resulting from the above reasons, the logic for handling initalizer lists
/// are complicated. So we have this dedicated class for it. It is built on
/// top of the SpirvEmitter class and calls into SPIRVEmitter for normal
/// translation tasks. This gives better code structure.
///
/// The logic for handling initalizer lists is largely the following:
///
/// First we flatten() the given initalizer list recursively and put all
/// SPIR-V instructions for non-nitializer-list AST Exprs into the initializers
/// queue. This handles curly braces of even wired forms like
/// float2x2 mat = {{1.}, {2., {{3.}}}, 4.};
///
/// Then we construct the final SPIR-V composite from the initializer list
/// by traversing the type of the composite. This is done recursively in the
/// depth first search manner, using the type of the composite as the root.
///
/// When we reach a scalar type, we will try to decode a scalar value from the
/// front of the initializers queue. This may trigger composite extraction
/// since the front of the queue may be a vector/matrix. The leftover values
/// after the extraction should be retained for the next decoding. Thus, we need
/// another queue, scalars, to keep track of leftover unused scalar values.
/// To adjust properly, when decoding values for a given type, we first try
/// the scalar queue.
///
/// When we reach a composite type, we will try to construct a composite using
/// the scalar values previously extracted and retained in the scalars queue.
/// To optimize, if we have no leftover scalars and a value of the same type at
/// the front of the initializers queue, we use the value as a whole.
///
/// If the composite type is vector or matrix, we decompose() it into scalars as
/// explained above. If it is a struct or array type, the element type is not
/// guaranteed to be scalars. But still, we need to split them into their
/// elements. For such cases, we create OpCompositeExtract SPIR-V instructions
/// for all the elements and push them into the initializers queue.
class InitListHandler {
public:
/// Constructs an InitListHandler which uses the given emitter for normal
/// translation tasks. It will reuse the ModuleBuilder embedded in the given
/// emitter.
InitListHandler(ASTContext &ctx, SpirvEmitter &emitter);
/// Processes the given InitListExpr and returns the <result-id> for the final
/// SPIR-V value.
SpirvInstruction *processInit(const InitListExpr *expr,
SourceRange rangeOverride = {});
/// Casts the given Expr to the given toType and returns the <result-id> for
/// the final SPIR-V value.
SpirvInstruction *processCast(QualType toType, const Expr *expr);
private:
/// \brief Wrapper method to create an error message and report it
/// in the diagnostic engine associated with this consumer.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N], SourceLocation loc) {
const auto diagId =
diags.getCustomDiagID(clang::DiagnosticsEngine::Error, message);
return diags.Report(loc, diagId);
}
/// Construct a SPIR-V instruction whose type is |type| using |initializers|
/// and returns the <result-id> for the final SPIR-V value of the given type.
SpirvInstruction *doProcess(QualType type, SourceLocation srcLoc,
SourceRange range = {});
/// Flattens the given InitListExpr and generates SPIR-V instructions for
/// all non-InitListExpr AST nodes. Puts those generated SPIR-V instructions
/// into |initializers|.
void flatten(const InitListExpr *expr);
/// Decomposes the given SpirvInstruction and puts all elements into the end
/// of the scalars queue.
void decompose(SpirvInstruction *inst, const SourceLocation &loc);
/// If the next initializer is a struct, replaces it with OpCompositeExtract
/// its members and returns true. Otherwise, does nothing and return false.
bool tryToSplitStruct();
/// If the next initializer is a constant array, replaces it with
/// OpCompositeExtract to all its members and returns true. Otherwise, does
/// nothing and return false.
bool tryToSplitConstantArray();
/// Emits the necessary SPIR-V instructions to create a SPIR-V value of the
/// given type. The scalars and initializers queue will be used to fetch the
/// next value.
SpirvInstruction *createInitForType(QualType type, SourceLocation,
SourceRange range = {});
SpirvInstruction *createInitForBuiltinType(QualType type, SourceLocation);
SpirvInstruction *createInitForVectorType(QualType elemType, uint32_t count,
SourceLocation,
SourceRange range = {});
SpirvInstruction *createInitForMatrixType(QualType matrixType, SourceLocation,
SourceRange range = {});
SpirvInstruction *createInitForStructType(QualType type, SourceLocation,
SourceRange range = {});
SpirvInstruction *createInitForConstantArrayType(QualType type,
SourceLocation,
SourceRange range = {});
SpirvInstruction *createInitForBufferOrImageType(QualType type,
SourceLocation);
private:
ASTContext &astContext;
SpirvEmitter &theEmitter;
SpirvBuilder &spvBuilder;
DiagnosticsEngine &diags;
/// A queue keeping track of unused SPIR-V for initializers. Since we will
/// only comsume initializers from the head of the queue and will not add new
/// initializers to the tail of the queue, we use a vector (containing the
/// reverse of the original intializer list) here and manipulate its tail.
/// This is more efficient than using deque.
std::vector<SpirvInstruction *> initializers;
/// A queue keeping track of previously extracted but unused scalars.
/// Each element is a pair, with the first element as the SPIR-V <result-id>
/// and the second element as the AST type of the scalar value.
std::deque<std::pair<SpirvInstruction *, QualType>> scalars;
};
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/InitListHandler.cpp | //===------- InitListHandler.cpp - Initializer List Handler -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements an initalizer list handler that takes in an
// InitListExpr and emits the corresponding SPIR-V instructions for it.
//
//===----------------------------------------------------------------------===//
#include "InitListHandler.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include <algorithm>
#include <iterator>
#include "LowerTypeVisitor.h"
#include "llvm/ADT/SmallVector.h"
namespace clang {
namespace spirv {
InitListHandler::InitListHandler(ASTContext &ctx, SpirvEmitter &emitter)
: astContext(ctx), theEmitter(emitter),
spvBuilder(emitter.getSpirvBuilder()),
diags(emitter.getDiagnosticsEngine()) {}
SpirvInstruction *InitListHandler::processInit(const InitListExpr *expr,
SourceRange rangeOverride) {
initializers.clear();
scalars.clear();
flatten(expr);
// Reverse the whole initializer list so we can manipulate the list at the
// tail of the vector. This is more efficient than using a deque.
std::reverse(std::begin(initializers), std::end(initializers));
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
return doProcess(expr->getType(), expr->getExprLoc(), range);
}
SpirvInstruction *InitListHandler::processCast(QualType toType,
const Expr *expr) {
initializers.clear();
scalars.clear();
auto *initializer = theEmitter.loadIfGLValue(expr);
if (initializer)
initializers.push_back(initializer);
return doProcess(toType, expr->getExprLoc());
}
SpirvInstruction *InitListHandler::doProcess(QualType type,
SourceLocation srcLoc,
SourceRange range) {
auto *init = createInitForType(type, srcLoc, range);
if (init) {
// For successful translation, we should have consumed all initializers and
// scalars extracted from them.
assert(initializers.empty());
assert(scalars.empty());
}
return init;
}
void InitListHandler::flatten(const InitListExpr *expr) {
const auto numInits = expr->getNumInits();
for (uint32_t i = 0; i < numInits; ++i) {
const Expr *init = expr->getInit(i);
if (const auto *subInitList = dyn_cast<InitListExpr>(init)) {
flatten(subInitList);
} else if (const auto *subInitList = dyn_cast<InitListExpr>(
// Ignore constructor casts which are no-ops
// For cases like: <type>(<initializer-list>)
init->IgnoreParenNoopCasts(theEmitter.getASTContext()))) {
flatten(subInitList);
} else {
auto *initializer = theEmitter.loadIfGLValue(init);
if (!initializer) {
initializers.clear();
return;
}
initializers.push_back(initializer);
}
}
}
// Note that we cannot use inst->getSourceLocation() for OpCompositeExtract.
// For example, float3(sign(v4f.xyz - 2 * v4f.xyz)) is InitListExpr and the
// result of "sign(v4f.xyz - 2 * v4f.xyz)" has its location as the start
// location of "v4f.xyz". When InitListHandler::decompose() handles inst
// for "sign(v4f.xyz - 2 * v4f.xyz)", inst->getSourceLocation() is the location
// of "v4f.xyz". However, we must use the start location of "sign(" for
// OpCompositeExtract.
void InitListHandler::decompose(SpirvInstruction *inst,
const SourceLocation &loc) {
const QualType type = inst->getAstResultType();
QualType elemType = {};
uint32_t elemCount = 0, rowCount = 0, colCount = 0;
// Scalar cases, including vec1 and mat1x1.
if (isScalarType(type, &elemType)) {
scalars.emplace_back(inst, elemType);
}
// Vector cases, including mat1xN and matNx1 where N > 1.
else if (isVectorType(type, &elemType, &elemCount)) {
for (uint32_t i = 0; i < elemCount; ++i) {
auto *element = spvBuilder.createCompositeExtract(
elemType, inst, {i}, loc, inst->getSourceRange());
scalars.emplace_back(element, elemType);
}
}
// MxN matrix cases, where M > 1 and N > 1.
else if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
for (uint32_t i = 0; i < rowCount; ++i)
for (uint32_t j = 0; j < colCount; ++j) {
auto *element =
spvBuilder.createCompositeExtract(elemType, inst, {i, j}, loc);
scalars.emplace_back(element, elemType);
}
}
// The decompose method only supports scalar, vector, and matrix types.
else {
llvm_unreachable(
"decompose() should only handle scalar or vector or matrix types");
}
}
bool InitListHandler::tryToSplitStruct() {
if (initializers.empty())
return false;
auto *init = initializers.back();
if (!init)
return false;
const QualType initType = init->getAstResultType();
if (!initType->isStructureType() ||
// Sampler types will pass the above check but we cannot split it.
isSampler(initType) ||
// Can not split structuredOrByteBuffer
isAKindOfStructuredOrByteBuffer(initType))
return false;
// We are certain the current intializer will be replaced by now.
initializers.pop_back();
const auto &loc = init->getSourceLocation();
const auto *structDecl = initType->getAsStructureType()->getDecl();
// Create MemberExpr for each field of the struct
llvm::SmallVector<SpirvInstruction *, 4> fields;
uint32_t i = 0;
for (auto *field : structDecl->fields()) {
auto *extract =
spvBuilder.createCompositeExtract(field->getType(), init, {i}, loc);
extract->setLayoutRule(init->getLayoutRule());
fields.push_back(extract);
++i;
}
// Push in the reverse order
initializers.insert(initializers.end(), fields.rbegin(), fields.rend());
return true;
}
bool InitListHandler::tryToSplitConstantArray() {
if (initializers.empty())
return false;
auto *init = initializers.back();
if (!init)
return false;
const QualType initType = init->getAstResultType();
if (!initType->isConstantArrayType())
return false;
// We are certain the current intializer will be replaced by now.
initializers.pop_back();
const auto &loc = init->getSourceLocation();
const auto &context = theEmitter.getASTContext();
const auto *arrayType = context.getAsConstantArrayType(initType);
const auto elemType = arrayType->getElementType();
// TODO: handle (unlikely) extra large array size?
const auto size = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
// Create ArraySubscriptExpr for each element of the array
// TODO: It will generate lots of elements if the array size is very large.
// But do we have a better solution?
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < size; ++i) {
auto *extract = spvBuilder.createCompositeExtract(elemType, init, {i}, loc);
elements.push_back(extract);
}
// Push in the reverse order
initializers.insert(initializers.end(), elements.rbegin(), elements.rend());
return true;
}
SpirvInstruction *InitListHandler::createInitForType(QualType type,
SourceLocation srcLoc,
SourceRange range) {
type = type.getCanonicalType();
if (type->isBuiltinType())
return createInitForBuiltinType(type, srcLoc);
QualType elemType = {};
uint32_t elemCount = 0;
if (isVectorType(type, &elemType, &elemCount))
return createInitForVectorType(elemType, elemCount, srcLoc, range);
// The purpose of this check is for vectors of size 1 (for which isVectorType
// is false).
if (isScalarType(type, &elemType))
return createInitForVectorType(elemType, 1, srcLoc, range);
if (hlsl::IsHLSLMatType(type)) {
return createInitForMatrixType(type, srcLoc, range);
}
// Samplers, (RW)Buffers, (RW)Textures
// It is important that this happens before checking of structure types.
if (isOpaqueType(type))
return createInitForBufferOrImageType(type, srcLoc);
// This should happen before the check for normal struct types
if (isAKindOfStructuredOrByteBuffer(type)) {
return createInitForBufferOrImageType(type, srcLoc);
}
if (type->isStructureType())
return createInitForStructType(type, srcLoc, range);
if (type->isConstantArrayType())
return createInitForConstantArrayType(type, srcLoc, range);
emitError("initializer for type %0 unimplemented", srcLoc) << type;
return nullptr;
}
SpirvInstruction *
InitListHandler::createInitForBuiltinType(QualType type,
SourceLocation srcLoc) {
assert(type->isBuiltinType());
if (!scalars.empty()) {
const auto init = scalars.front();
scalars.pop_front();
return theEmitter.castToType(init.first, init.second, type, srcLoc);
}
// Keep splitting structs or arrays
while (tryToSplitStruct() || tryToSplitConstantArray())
;
if (initializers.empty()) {
return nullptr;
}
auto init = initializers.back();
initializers.pop_back();
if (!init->getAstResultType()->isBuiltinType()) {
decompose(init, srcLoc);
return createInitForBuiltinType(type, srcLoc);
}
return theEmitter.castToType(init, init->getAstResultType(), type, srcLoc);
}
SpirvInstruction *
InitListHandler::createInitForVectorType(QualType elemType, uint32_t count,
SourceLocation srcLoc,
SourceRange range) {
// If we don't have leftover scalars, we can try to see if there is a vector
// of the same size in the original initializer list so that we can use it
// directly. For all other cases, we need to construct a new vector as the
// initializer.
if (scalars.empty()) {
// Keep splitting structs or arrays
while (tryToSplitStruct() || tryToSplitConstantArray())
;
// Not enough elements in the initializer list. Giving up.
if (initializers.empty())
return nullptr;
auto init = initializers.back();
const auto initType = init->getAstResultType();
uint32_t elemCount = 0;
if (isVectorType(initType, nullptr, &elemCount) && elemCount == count) {
initializers.pop_back();
/// HLSL vector types are parameterized templates and we cannot
/// construct them. So we construct an ExtVectorType here instead.
/// This is unfortunate since it means we need to handle ExtVectorType
/// in all type casting methods in SpirvEmitter.
const auto toVecType =
theEmitter.getASTContext().getExtVectorType(elemType, count);
return theEmitter.castToType(init, initType, toVecType, srcLoc, range);
}
}
if (count == 1)
return createInitForBuiltinType(elemType, srcLoc);
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < count; ++i) {
// All elements are scalars, which should already be casted to the correct
// type if necessary.
elements.push_back(createInitForBuiltinType(elemType, srcLoc));
}
const QualType vecType = astContext.getExtVectorType(elemType, count);
// TODO: use OpConstantComposite when all components are constants
return spvBuilder.createCompositeConstruct(vecType, elements, srcLoc, range);
}
SpirvInstruction *InitListHandler::createInitForMatrixType(
QualType matrixType, SourceLocation srcLoc, SourceRange range) {
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(matrixType, rowCount, colCount);
const QualType elemType = hlsl::GetHLSLMatElementType(matrixType);
// Same as the vector case, first try to see if we already have a matrix at
// the beginning of the initializer queue.
if (scalars.empty()) {
// Keep splitting structs or arrays
while (tryToSplitStruct() || tryToSplitConstantArray())
;
// Not enough elements in the initializer list. Giving up.
if (initializers.empty())
return nullptr;
auto init = initializers.back();
if (hlsl::IsHLSLMatType(init->getAstResultType())) {
uint32_t initRowCount = 0, initColCount = 0;
hlsl::GetHLSLMatRowColCount(init->getAstResultType(), initRowCount,
initColCount);
if (rowCount == initRowCount && colCount == initColCount) {
initializers.pop_back();
return theEmitter.castToType(init, init->getAstResultType(), matrixType,
srcLoc, range);
}
}
}
if (rowCount == 1)
return createInitForVectorType(elemType, colCount, srcLoc, range);
if (colCount == 1)
return createInitForVectorType(elemType, rowCount, srcLoc, range);
llvm::SmallVector<SpirvInstruction *, 4> vectors;
for (uint32_t i = 0; i < rowCount; ++i) {
// All elements are vectors, which should already be casted to the correct
// type if necessary.
vectors.push_back(
createInitForVectorType(elemType, colCount, srcLoc, range));
}
// TODO: use OpConstantComposite when all components are constants
return spvBuilder.createCompositeConstruct(matrixType, vectors, srcLoc,
range);
}
SpirvInstruction *
InitListHandler::createInitForStructType(QualType type, SourceLocation srcLoc,
SourceRange range) {
assert(type->isStructureType() && !isSampler(type));
// Same as the vector case, first try to see if we already have a struct at
// the beginning of the initializer queue.
if (scalars.empty()) {
// Keep splitting arrays
while (tryToSplitConstantArray())
;
// Note: an empty initializer list can be valid. Ex: initializing an
// empty struct.
if (!initializers.empty()) {
auto init = initializers.back();
// We can only avoid decomposing and reconstructing when the type is
// exactly the same.
if (type.getCanonicalType() ==
init->getAstResultType().getCanonicalType()) {
initializers.pop_back();
return init;
}
}
// Otherwise, if the next initializer is a struct, it is not of the same
// type as we expected. Split it. Just need to do one iteration since a
// field in the next struct initializer may be of the same struct type as
// a field we are about the construct.
tryToSplitStruct();
}
const RecordType *recordType = type->getAs<RecordType>();
assert(recordType);
LowerTypeVisitor lowerTypeVisitor(astContext, theEmitter.getSpirvContext(),
theEmitter.getSpirvOptions(),
theEmitter.getSpirvBuilder());
const SpirvType *spirvType =
lowerTypeVisitor.lowerType(type, SpirvLayoutRule::Void, false, srcLoc);
llvm::SmallVector<SpirvInstruction *, 4> fields;
const StructType *structType = dyn_cast<StructType>(spirvType);
assert(structType != nullptr);
forEachSpirvField(
recordType, structType,
[this, &fields, srcLoc, range](size_t spirvFieldIndex,
const QualType &fieldType,
const StructType::FieldInfo &fieldInfo) {
SpirvInstruction *init = createInitForType(fieldType, srcLoc, range);
if (!init)
return false;
// For non bit-fields, `init` will be the value for the component.
if (!fieldInfo.bitfield.hasValue()) {
assert(fields.size() == fieldInfo.fieldIndex);
fields.push_back(init);
return true;
}
// For a bit fields we need to insert it into the container.
// The first time we see this bit field, init is used as the value.
// This assumes that 0 is the first offset in the bitfield.
if (fields.size() <= fieldInfo.fieldIndex) {
assert(fieldInfo.bitfield->offsetInBits == 0);
fields.push_back(init);
return true;
}
// For the remaining bitfields, we need to insert them into the existing
// container, which is the last element in `fields`.
assert(fields.size() == fieldInfo.fieldIndex + 1);
fields.back() = spvBuilder.createBitFieldInsert(
fieldType, fields.back(), init, fieldInfo.bitfield->offsetInBits,
fieldInfo.bitfield->sizeInBits, srcLoc, range);
return true;
},
true);
for (const auto *field : fields)
if (field == nullptr)
return nullptr;
return spvBuilder.createCompositeConstruct(type, fields, srcLoc, range);
}
SpirvInstruction *InitListHandler::createInitForConstantArrayType(
QualType type, SourceLocation srcLoc, SourceRange range) {
assert(type->isConstantArrayType());
// Same as the vector case, first try to see if we already have an array at
// the beginning of the initializer queue.
if (scalars.empty()) {
// Keep splitting structs
while (tryToSplitStruct())
;
// Not enough elements in the initializer list. Giving up.
if (initializers.empty())
return nullptr;
auto init = initializers.back();
// We can only avoid decomposing and reconstructing when the type is
// exactly the same.
if (type.getCanonicalType() ==
init->getAstResultType().getCanonicalType()) {
initializers.pop_back();
return init;
}
// Otherwise, if the next initializer is an array, it is not of the same
// type as we expected. Split it. Just need to do one iteration since the
// next array initializer may have the same element type as the one we
// are about to construct but with different size.
tryToSplitConstantArray();
}
const auto *arrType = theEmitter.getASTContext().getAsConstantArrayType(type);
const auto elemType = arrType->getElementType();
// TODO: handle (unlikely) extra large array size?
const auto size = static_cast<uint32_t>(arrType->getSize().getZExtValue());
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < size; ++i) {
auto *it = createInitForType(elemType, srcLoc, range);
if (!it)
return nullptr;
elements.push_back(it);
}
// TODO: use OpConstantComposite when all components are constants
return spvBuilder.createCompositeConstruct(type, elements, srcLoc, range);
}
SpirvInstruction *
InitListHandler::createInitForBufferOrImageType(QualType type,
SourceLocation srcLoc) {
assert(isOpaqueType(type) || isAKindOfStructuredOrByteBuffer(type));
// Samplers, (RW)Buffers, and (RW)Textures are translated into OpTypeSampler
// and OpTypeImage. They should be treated similar as builtin types.
if (!scalars.empty()) {
const auto init = scalars.front();
scalars.pop_front();
// Require exact type match between the initializer and the target component
if (init.second.getCanonicalType() != type.getCanonicalType()) {
emitError("cannot cast initializer type %0 into variable type %1", srcLoc)
<< init.second << type;
return nullptr;
}
return init.first;
}
// Keep splitting structs or arrays
while (tryToSplitStruct() || tryToSplitConstantArray())
;
// Not enough elements in the initializer list. Giving up.
if (initializers.empty()) {
return nullptr;
}
auto init = initializers.back();
initializers.pop_back();
if (!init)
return nullptr;
if (init->getAstResultType().getCanonicalType() != type.getCanonicalType()) {
emitError("Cannot cast initializer type %0 into variable type %1", srcLoc)
<< init->getAstResultType() << type;
return nullptr;
}
return init;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/EmitVisitor.h | //===-- EmitVisitor.h - Emit Visitor ----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SPIRV_EMITVISITOR_H
#define LLVM_CLANG_SPIRV_EMITVISITOR_H
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
#include <functional>
namespace clang {
namespace spirv {
class SpirvFunction;
class SpirvBasicBlock;
class SpirvType;
class EmitTypeHandler {
public:
struct DecorationInfo {
DecorationInfo(spv::Decoration decor, llvm::ArrayRef<uint32_t> params = {},
llvm::Optional<uint32_t> index = llvm::None)
: decoration(decor), decorationParams(params.begin(), params.end()),
memberIndex(index) {}
bool operator==(const DecorationInfo &other) const {
return decoration == other.decoration &&
decorationParams == other.decorationParams &&
memberIndex.hasValue() == other.memberIndex.hasValue() &&
(!memberIndex.hasValue() ||
memberIndex.getValue() == other.memberIndex.getValue());
}
spv::Decoration decoration;
llvm::SmallVector<uint32_t, 4> decorationParams;
llvm::Optional<uint32_t> memberIndex;
};
public:
EmitTypeHandler(ASTContext &astCtx, SpirvContext &spvContext,
const SpirvCodeGenOptions &opts, FeatureManager &featureMgr,
std::vector<uint32_t> *debugVec,
std::vector<uint32_t> *decVec,
std::vector<uint32_t> *typesVec,
const std::function<uint32_t()> &takeNextIdFn)
: astContext(astCtx), context(spvContext), featureManager(featureMgr),
debugVariableBinary(debugVec), annotationsBinary(decVec),
typeConstantBinary(typesVec), takeNextIdFunction(takeNextIdFn),
emittedConstantInts({}), emittedConstantFloats({}),
emittedConstantComposites({}), emittedConstantNulls({}),
emittedUndef({}), emittedConstantBools() {
assert(decVec);
assert(typesVec);
}
// Disable copy constructor/assignment.
EmitTypeHandler(const EmitTypeHandler &) = delete;
EmitTypeHandler &operator=(const EmitTypeHandler &) = delete;
// Emits the instruction for the given type into the typeConstantBinary and
// returns the result-id for the type. If the type has already been emitted,
// it only returns its result-id.
//
// If any names are associated with the type (or its members in case of
// structs), the OpName/OpMemberNames will also be emitted.
//
// If any decorations apply to the type, it also emits the decoration
// instructions into the annotationsBinary.
uint32_t emitType(const SpirvType *);
// Emits OpDecorate (or OpMemberDecorate if memberIndex is non-zero)
// targetting the given type. Uses the given decoration kind and its
// parameters.
void emitDecoration(uint32_t typeResultId, spv::Decoration,
llvm::ArrayRef<uint32_t> decorationParams,
llvm::Optional<uint32_t> memberIndex = llvm::None);
uint32_t getOrCreateConstant(SpirvConstant *);
// Emits an OpConstant instruction and returns its result-id.
// For non-specialization constants, if an identical constant has already been
// emitted, returns the existing constant's result-id.
//
// Note1: This method modifies the curTypeInst. Do not call in the middle of
// construction of another instruction.
//
// Note 2: Integer constants may need to be generated for cases where there is
// no SpirvConstantInteger instruction in the module. For example, we need to
// emit an integer in order to create an array type. Therefore,
// 'getOrCreateConstantInt' has a different signature than others. If a
// constant instruction is provided, and it already has a result-id assigned,
// it will be used. Otherwise a new result-id will be allocated for the
// instruction.
uint32_t
getOrCreateConstantInt(llvm::APInt value, const SpirvType *type,
bool isSpecConst,
SpirvInstruction *constantInstruction = nullptr);
uint32_t getOrCreateConstantFloat(SpirvConstantFloat *);
uint32_t getOrCreateConstantComposite(SpirvConstantComposite *);
uint32_t getOrCreateConstantNull(SpirvConstantNull *);
uint32_t getOrCreateUndef(SpirvUndef *);
uint32_t getOrCreateConstantBool(SpirvConstantBoolean *);
template <typename vecType>
void emitLiteral(const SpirvConstant *, vecType &outInst);
template <typename vecType>
void emitFloatLiteral(const SpirvConstantFloat *, vecType &outInst);
template <typename vecType>
void emitIntLiteral(const SpirvConstantInteger *, vecType &outInst);
template <typename vecType>
void emitIntLiteral(const llvm::APInt &literalVal, vecType &outInst);
private:
void initTypeInstruction(spv::Op op);
void finalizeTypeInstruction();
// Returns the result-id for the given type and decorations. If a type with
// the same decorations have already been used, it returns the existing
// result-id. If not, creates a new result-id for such type and returns it.
uint32_t getResultIdForType(const SpirvType *, bool *alreadyExists);
// Emits an OpName (if memberIndex is not provided) or OpMemberName (if
// memberIndex is provided) for the given target result-id.
void emitNameForType(llvm::StringRef name, uint32_t targetTypeId,
llvm::Optional<uint32_t> memberIndex = llvm::None);
// There is no guarantee that an instruction or a function or a basic block
// has been assigned result-id. This method returns the result-id for the
// given object. If a result-id has not been assigned yet, it'll assign
// one and return it.
template <class T> uint32_t getOrAssignResultId(T *obj) {
if (!obj->getResultId()) {
obj->setResultId(takeNextIdFunction());
}
return obj->getResultId();
}
private:
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation loc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(loc, diagId);
}
private:
ASTContext &astContext;
SpirvContext &context;
FeatureManager featureManager;
std::vector<uint32_t> curTypeInst;
std::vector<uint32_t> curDecorationInst;
std::vector<uint32_t> *debugVariableBinary;
std::vector<uint32_t> *annotationsBinary;
std::vector<uint32_t> *typeConstantBinary;
std::function<uint32_t()> takeNextIdFunction;
// The array type requires the result-id of an OpConstant for its length. In
// order to avoid duplicate OpConstant instructions, we keep a map of constant
// uint value to the result-id of the OpConstant for that value.
llvm::DenseMap<std::pair<uint64_t, const SpirvType *>, uint32_t>
emittedConstantInts;
llvm::DenseMap<std::pair<uint64_t, const SpirvType *>, uint32_t>
emittedConstantFloats;
llvm::SmallVector<SpirvConstantComposite *, 8> emittedConstantComposites;
llvm::SmallVector<SpirvConstantNull *, 8> emittedConstantNulls;
llvm::SmallVector<SpirvUndef *, 8> emittedUndef;
SpirvConstantBoolean *emittedConstantBools[2];
llvm::DenseSet<const SpirvInstruction *> emittedSpecConstantInstructions;
// emittedTypes is a map that caches the result-id of types in order to avoid
// emitting an identical type multiple times.
llvm::DenseMap<const SpirvType *, uint32_t> emittedTypes;
};
/// \breif The visitor class that emits the SPIR-V words from the in-memory
/// representation.
class EmitVisitor : public Visitor {
public:
/// \brief The struct representing a SPIR-V module header.
struct Header {
/// \brief Default constructs a SPIR-V module header with id bound 0.
Header(uint32_t bound, uint32_t version);
/// \brief Feeds the consumer with all the SPIR-V words for this header.
std::vector<uint32_t> takeBinary();
const uint32_t magicNumber;
uint32_t version;
const uint32_t generator;
uint32_t bound;
const uint32_t reserved;
};
public:
EmitVisitor(ASTContext &astCtx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts, FeatureManager &featureMgr)
: Visitor(opts, spvCtx), astContext(astCtx), featureManager(featureMgr),
id(0),
typeHandler(astCtx, spvCtx, opts, featureMgr, &debugVariableBinary,
&annotationsBinary, &typeConstantBinary,
[this]() -> uint32_t { return takeNextId(); }),
debugMainFileId(0), debugInfoExtInstId(0), debugLineStart(0),
debugLineEnd(0), debugColumnStart(0), debugColumnEnd(0),
lastOpWasMergeInst(false), inEntryFunctionWrapper(false),
hlslVersion(0) {}
~EmitVisitor();
// Visit different SPIR-V constructs for emitting.
bool visit(SpirvModule *, Phase phase) override;
bool visit(SpirvFunction *, Phase phase) override;
bool visit(SpirvBasicBlock *, Phase phase) override;
bool visit(SpirvCapability *) override;
bool visit(SpirvExtension *) override;
bool visit(SpirvExtInstImport *) override;
bool visit(SpirvMemoryModel *) override;
bool visit(SpirvEmitVertex *) override;
bool visit(SpirvEndPrimitive *) override;
bool visit(SpirvEntryPoint *) override;
bool visit(SpirvExecutionMode *) override;
bool visit(SpirvString *) override;
bool visit(SpirvSource *) override;
bool visit(SpirvModuleProcessed *) override;
bool visit(SpirvDecoration *) override;
bool visit(SpirvVariable *) override;
bool visit(SpirvFunctionParameter *) override;
bool visit(SpirvLoopMerge *) override;
bool visit(SpirvSelectionMerge *) override;
bool visit(SpirvBranch *) override;
bool visit(SpirvBranchConditional *) override;
bool visit(SpirvKill *) override;
bool visit(SpirvReturn *) override;
bool visit(SpirvSwitch *) override;
bool visit(SpirvUnreachable *) override;
bool visit(SpirvAccessChain *) override;
bool visit(SpirvAtomic *) override;
bool visit(SpirvBarrier *) override;
bool visit(SpirvBinaryOp *) override;
bool visit(SpirvBitFieldExtract *) override;
bool visit(SpirvBitFieldInsert *) override;
bool visit(SpirvConstantBoolean *) override;
bool visit(SpirvConstantInteger *) override;
bool visit(SpirvConstantFloat *) override;
bool visit(SpirvConstantComposite *) override;
bool visit(SpirvConstantNull *) override;
bool visit(SpirvUndef *) override;
bool visit(SpirvCompositeConstruct *) override;
bool visit(SpirvCompositeExtract *) override;
bool visit(SpirvCompositeInsert *) override;
bool visit(SpirvExtInst *) override;
bool visit(SpirvFunctionCall *) override;
bool visit(SpirvGroupNonUniformOp *) override;
bool visit(SpirvImageOp *) override;
bool visit(SpirvImageQuery *) override;
bool visit(SpirvImageSparseTexelsResident *) override;
bool visit(SpirvImageTexelPointer *) override;
bool visit(SpirvLoad *) override;
bool visit(SpirvCopyObject *) override;
bool visit(SpirvSampledImage *) override;
bool visit(SpirvSelect *) override;
bool visit(SpirvSpecConstantBinaryOp *) override;
bool visit(SpirvSpecConstantUnaryOp *) override;
bool visit(SpirvStore *) override;
bool visit(SpirvNullaryOp *) override;
bool visit(SpirvUnaryOp *) override;
bool visit(SpirvVectorShuffle *) override;
bool visit(SpirvArrayLength *) override;
bool visit(SpirvRayTracingOpNV *) override;
bool visit(SpirvDemoteToHelperInvocation *) override;
bool visit(SpirvIsHelperInvocationEXT *) override;
bool visit(SpirvRayQueryOpKHR *) override;
bool visit(SpirvReadClock *) override;
bool visit(SpirvRayTracingTerminateOpKHR *) override;
bool visit(SpirvDebugInfoNone *) override;
bool visit(SpirvDebugSource *) override;
bool visit(SpirvDebugCompilationUnit *) override;
bool visit(SpirvDebugLexicalBlock *) override;
bool visit(SpirvDebugScope *) override;
bool visit(SpirvDebugFunctionDeclaration *) override;
bool visit(SpirvDebugFunction *) override;
bool visit(SpirvDebugFunctionDefinition *) override;
bool visit(SpirvDebugEntryPoint *) override;
bool visit(SpirvDebugLocalVariable *) override;
bool visit(SpirvDebugDeclare *) override;
bool visit(SpirvDebugGlobalVariable *) override;
bool visit(SpirvDebugExpression *) override;
bool visit(SpirvDebugTypeBasic *) override;
bool visit(SpirvDebugTypeVector *) override;
bool visit(SpirvDebugTypeMatrix *) override;
bool visit(SpirvDebugTypeArray *) override;
bool visit(SpirvDebugTypeFunction *) override;
bool visit(SpirvDebugTypeComposite *) override;
bool visit(SpirvDebugTypeMember *) override;
bool visit(SpirvDebugTypeTemplate *) override;
bool visit(SpirvDebugTypeTemplateParameter *) override;
bool visit(SpirvIntrinsicInstruction *) override;
bool visit(SpirvEmitMeshTasksEXT *) override;
bool visit(SpirvSetMeshOutputsEXT *) override;
using Visitor::visit;
// Returns the assembled binary built up in this visitor.
std::vector<uint32_t> takeBinary();
private:
// Returns the next available result-id.
uint32_t takeNextId() { return ++id; }
// There is no guarantee that an instruction or a function or a basic block
// has been assigned result-id. This method returns the result-id for the
// given object. If a result-id has not been assigned yet, it'll assign
// one and return it.
template <class T> uint32_t getOrAssignResultId(T *obj) {
if (!obj->getResultId()) {
obj->setResultId(takeNextId());
}
return obj->getResultId();
}
/// If we already created OpString for str, just return the id of the created
/// one. Otherwise, create it, keep it in stringIdMap, and return its id.
uint32_t getOrCreateOpStringId(llvm::StringRef str);
// Generate DebugSource for inst
void generateDebugSource(uint32_t fileId, uint32_t textId,
SpirvDebugSource *inst);
// Generate DebugSourceContinued for inst
void generateDebugSourceContinued(uint32_t textId, SpirvDebugSource *inst);
/// Generate DebugSource and DebugSourceContinue for inst using previously
/// generated fileId, chopping source into pieces as needed.
void generateChoppedSource(uint32_t fileId, SpirvDebugSource *inst);
/// In the OpenCL.DebugInfo.100 spec some parameters are literals, where in
/// the NonSemantic.Shader.DebugInfo.100 spec they are encoded as constant
/// operands. This function takes care of checking which version we are
/// emitting and either returning the literal directly or a constant.
uint32_t getLiteralEncodedForDebugInfo(uint32_t val);
// Emits an OpLine instruction for the given operation into the given binary
// section.
void emitDebugLine(spv::Op op, const SourceLocation &loc,
const SourceRange &range, std::vector<uint32_t> *section,
bool isDebugScope = false);
// Initiates the creation of a new instruction with the given Opcode.
void initInstruction(spv::Op, const SourceLocation &);
// Initiates the creation of the given SPIR-V instruction.
// If the given instruction has a return type, it will also trigger emitting
// the necessary type (and its associated decorations) and uses its result-id
// in the instruction.
void initInstruction(SpirvInstruction *);
// Finalizes the current instruction by encoding the instruction size into the
// first word, and then appends the current instruction to the given SPIR-V
// binary section.
void finalizeInstruction(std::vector<uint32_t> *section);
// Encodes the given string into the current instruction that is being built.
void encodeString(llvm::StringRef value);
// Emits an OpName instruction into the debugBinary for the given target.
void emitDebugNameForInstruction(uint32_t resultId, llvm::StringRef name);
// TODO: Add a method for adding OpMemberName instructions for struct members
// using the type information.
// Returns the SPIR-V result id of the OpString for the File operand of
// OpSource instruction.
uint32_t getSourceFileId(SpirvSource *inst) {
uint32_t fileId = debugMainFileId;
if (inst->hasFile()) {
fileId = getOrCreateOpStringId(inst->getFile()->getString());
}
return fileId;
}
// Returns true if we already emitted the OpSource instruction whose File
// operand is |fileId|.
bool isSourceWithFileEmitted(uint32_t fileId) {
return emittedSource[fileId] != 0;
}
// Inserts the file id of OpSource instruction to the id of its
// corresponding DebugSource instruction.
void setFileOfSourceToDebugSourceId(uint32_t fileId, uint32_t dbg_src_id) {
emittedSource[fileId] = dbg_src_id;
}
private:
/// Emits error to the diagnostic engine associated with this visitor.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation loc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(loc, diagId);
}
private:
// Object that holds Clang AST nodes.
ASTContext &astContext;
// Feature manager.
FeatureManager featureManager;
// The last result-id that's been used so far.
uint32_t id;
// Handler for emitting types and their related instructions.
EmitTypeHandler typeHandler;
// Current instruction being built
SmallVector<uint32_t, 16> curInst;
// All preamble instructions in the following order:
// OpCapability, OpExtension, OpExtInstImport, OpMemoryModel, OpEntryPoint,
// OpExecutionMode(Id)
std::vector<uint32_t> preambleBinary;
// Debug instructions related to file. Includes:
// OpString, OpSourceExtension, OpSource, OpSourceContinued
std::vector<uint32_t> debugFileBinary;
// All debug instructions related to variable name. Includes:
// OpName, OpMemberName, OpModuleProcessed
std::vector<uint32_t> debugVariableBinary;
// All annotation instructions: OpDecorate, OpMemberDecorate, OpGroupDecorate,
// OpGroupMemberDecorate, and OpDecorationGroup.
std::vector<uint32_t> annotationsBinary;
// All type and constant instructions
std::vector<uint32_t> typeConstantBinary;
// All global variable declarations (all OpVariable instructions whose Storage
// Class is not Function)
std::vector<uint32_t> globalVarsBinary;
// All Rich Debug Info instructions
std::vector<uint32_t> richDebugInfo;
// All other instructions
std::vector<uint32_t> mainBinary;
// String literals to SpirvString objects
llvm::StringMap<uint32_t> stringIdMap;
// Main file information for debugging that will be used by OpLine.
uint32_t debugMainFileId;
// Id for Vulkan DebugInfo extended instruction set. Used when generating
// Debug[No]Line
uint32_t debugInfoExtInstId;
// One HLSL source line may result in several SPIR-V instructions. In order
// to avoid emitting debug line instructions with identical line and column
// numbers, we record the last line and column numbers that were used in a
// debug line op, and only emit a new debug line op when a new line/column
// in the source is discovered.
uint32_t debugLineStart;
uint32_t debugLineEnd;
uint32_t debugColumnStart;
uint32_t debugColumnEnd;
// True if the last emitted instruction was OpSelectionMerge or OpLoopMerge.
bool lastOpWasMergeInst;
// True if currently it enters an entry function wrapper.
bool inEntryFunctionWrapper;
// Map of filename string id to the id of its DebugSource instruction. When
// generating OpSource instruction without a result id, use 1 to remember it
// was generated.
llvm::DenseMap<uint32_t, uint32_t> emittedSource;
uint32_t hlslVersion;
// Vector to contain SpirvInstruction objects created by this class. The
// destructor of this class will release them.
std::vector<SpirvInstruction *> spvInstructions;
};
} // namespace spirv
} // namespace clang
#endif // LLVM_CLANG_SPIRV_EMITVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvInstruction.cpp | //===- SpirvInstruction.cpp - SPIR-V Instruction Representation -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements the in-memory representation of SPIR-V instructions.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/SpirvInstruction.h"
#include "clang/SPIRV/BitwiseCast.h"
#include "clang/SPIRV/SpirvBasicBlock.h"
#include "clang/SPIRV/SpirvFunction.h"
#include "clang/SPIRV/SpirvType.h"
#include "clang/SPIRV/SpirvVisitor.h"
#include "clang/SPIRV/String.h"
namespace clang {
namespace spirv {
#define DEFINE_INVOKE_VISITOR_FOR_CLASS(cls) \
bool cls::invokeVisitor(Visitor *v) { return v->visit(this); }
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvCapability)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvExtension)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvExtInstImport)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvMemoryModel)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvEntryPoint)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvExecutionMode)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvString)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSource)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvModuleProcessed)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDecoration)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvVariable)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvFunctionParameter)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvLoopMerge)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSelectionMerge)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBranch)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBranchConditional)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvKill)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvReturn)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSwitch)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvUnreachable)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvAccessChain)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvAtomic)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBarrier)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBinaryOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBitFieldExtract)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvBitFieldInsert)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvConstantBoolean)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvConstantInteger)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvConstantFloat)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvConstantComposite)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvConstantNull)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvUndef)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvCompositeConstruct)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvCompositeExtract)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvCompositeInsert)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvEmitVertex)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvEndPrimitive)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvExtInst)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvFunctionCall)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvGroupNonUniformOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvImageOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvImageQuery)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvImageSparseTexelsResident)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvImageTexelPointer)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvLoad)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvCopyObject)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSampledImage)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSelect)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSpecConstantBinaryOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSpecConstantUnaryOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvStore)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvNullaryOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvUnaryOp)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvVectorShuffle)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvArrayLength)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvRayTracingOpNV)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDemoteToHelperInvocation)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvIsHelperInvocationEXT)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugInfoNone)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugSource)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugCompilationUnit)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugFunctionDeclaration)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugFunction)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugFunctionDefinition)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugEntryPoint)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugLocalVariable)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugGlobalVariable)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugOperation)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugExpression)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugDeclare)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugLexicalBlock)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugScope)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeBasic)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeArray)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeVector)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeMatrix)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeFunction)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeComposite)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeMember)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeTemplate)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvDebugTypeTemplateParameter)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvRayQueryOpKHR)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvReadClock)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvRayTracingTerminateOpKHR)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvIntrinsicInstruction)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvEmitMeshTasksEXT)
DEFINE_INVOKE_VISITOR_FOR_CLASS(SpirvSetMeshOutputsEXT)
#undef DEFINE_INVOKE_VISITOR_FOR_CLASS
SpirvInstruction::SpirvInstruction(Kind k, spv::Op op, QualType astType,
SourceLocation loc, SourceRange range)
: kind(k), opcode(op), astResultType(astType), resultId(0), srcLoc(loc),
srcRange(range), debugName(), resultType(nullptr), resultTypeId(0),
layoutRule(SpirvLayoutRule::Void), containsAlias(false),
storageClass(spv::StorageClass::Function), isRValue_(false),
isRelaxedPrecision_(false), isNonUniform_(false), isPrecise_(false),
isNoninterpolated_(false), isRasterizerOrdered_(false) {}
bool SpirvInstruction::isArithmeticInstruction() const {
switch (opcode) {
case spv::Op::OpSNegate:
case spv::Op::OpFNegate:
case spv::Op::OpIAdd:
case spv::Op::OpFAdd:
case spv::Op::OpISub:
case spv::Op::OpFSub:
case spv::Op::OpIMul:
case spv::Op::OpFMul:
case spv::Op::OpUDiv:
case spv::Op::OpSDiv:
case spv::Op::OpFDiv:
case spv::Op::OpUMod:
case spv::Op::OpSRem:
case spv::Op::OpSMod:
case spv::Op::OpFRem:
case spv::Op::OpFMod:
case spv::Op::OpVectorTimesScalar:
case spv::Op::OpMatrixTimesScalar:
case spv::Op::OpVectorTimesMatrix:
case spv::Op::OpMatrixTimesVector:
case spv::Op::OpMatrixTimesMatrix:
case spv::Op::OpOuterProduct:
case spv::Op::OpDot:
case spv::Op::OpIAddCarry:
case spv::Op::OpISubBorrow:
case spv::Op::OpUMulExtended:
case spv::Op::OpSMulExtended:
return true;
default:
return false;
}
}
SpirvCapability::SpirvCapability(SourceLocation loc, spv::Capability cap)
: SpirvInstruction(IK_Capability, spv::Op::OpCapability, QualType(), loc),
capability(cap) {}
bool SpirvCapability::operator==(const SpirvCapability &that) const {
return capability == that.capability;
}
SpirvExtension::SpirvExtension(SourceLocation loc,
llvm::StringRef extensionName)
: SpirvInstruction(IK_Extension, spv::Op::OpExtension, QualType(), loc),
extName(extensionName) {}
bool SpirvExtension::operator==(const SpirvExtension &that) const {
return extName == that.extName;
}
SpirvExtInstImport::SpirvExtInstImport(SourceLocation loc,
llvm::StringRef extensionName)
: SpirvInstruction(IK_ExtInstImport, spv::Op::OpExtInstImport, QualType(),
loc),
extName(extensionName) {}
SpirvMemoryModel::SpirvMemoryModel(spv::AddressingModel addrModel,
spv::MemoryModel memModel)
: SpirvInstruction(IK_MemoryModel, spv::Op::OpMemoryModel, QualType(),
/*SrcLoc*/ {}),
addressModel(addrModel), memoryModel(memModel) {}
SpirvEntryPoint::SpirvEntryPoint(SourceLocation loc,
spv::ExecutionModel executionModel,
SpirvFunction *entryPointFn,
llvm::StringRef nameStr,
llvm::ArrayRef<SpirvVariable *> iface)
: SpirvInstruction(IK_EntryPoint, spv::Op::OpEntryPoint, QualType(), loc),
execModel(executionModel), entryPoint(entryPointFn), name(nameStr),
interfaceVec(iface.begin(), iface.end()) {}
// OpExecutionMode and OpExecutionModeId instructions
SpirvExecutionMode::SpirvExecutionMode(SourceLocation loc, SpirvFunction *entry,
spv::ExecutionMode em,
llvm::ArrayRef<uint32_t> paramsVec,
bool usesIdParams)
: SpirvInstruction(IK_ExecutionMode,
usesIdParams ? spv::Op::OpExecutionModeId
: spv::Op::OpExecutionMode,
QualType(), loc),
entryPoint(entry), execMode(em),
params(paramsVec.begin(), paramsVec.end()) {}
SpirvString::SpirvString(SourceLocation loc, llvm::StringRef stringLiteral)
: SpirvInstruction(IK_String, spv::Op::OpString, QualType(), loc),
str(stringLiteral) {}
SpirvSource::SpirvSource(SourceLocation loc, spv::SourceLanguage language,
uint32_t ver, SpirvString *fileString,
llvm::StringRef src)
: SpirvInstruction(IK_Source, spv::Op::OpSource, QualType(), loc),
lang(language), version(ver), file(fileString), source(src) {}
SpirvModuleProcessed::SpirvModuleProcessed(SourceLocation loc,
llvm::StringRef processStr)
: SpirvInstruction(IK_ModuleProcessed, spv::Op::OpModuleProcessed,
QualType(), loc),
process(processStr) {}
SpirvDecoration::SpirvDecoration(SourceLocation loc,
SpirvInstruction *targetInst,
spv::Decoration decor,
llvm::ArrayRef<uint32_t> p,
llvm::Optional<uint32_t> idx)
: SpirvInstruction(IK_Decoration, getDecorateOpcode(decor, idx),
/*type*/ {}, loc),
target(targetInst), targetFunction(nullptr), decoration(decor),
index(idx), params(p.begin(), p.end()), idParams() {}
SpirvDecoration::SpirvDecoration(SourceLocation loc,
SpirvInstruction *targetInst,
spv::Decoration decor,
llvm::ArrayRef<llvm::StringRef> strParams,
llvm::Optional<uint32_t> idx)
: SpirvInstruction(IK_Decoration, getDecorateStringOpcode(idx.hasValue()),
/*type*/ {}, loc),
target(targetInst), targetFunction(nullptr), decoration(decor),
index(idx), params(), idParams() {
for (llvm::StringRef str : strParams) {
const auto &stringWords = string::encodeSPIRVString(str);
params.insert(params.end(), stringWords.begin(), stringWords.end());
}
}
SpirvDecoration::SpirvDecoration(SourceLocation loc,
SpirvInstruction *targetInst,
spv::Decoration decor,
llvm::ArrayRef<SpirvInstruction *> ids)
: SpirvInstruction(IK_Decoration, spv::Op::OpDecorateId,
/*type*/ {}, loc),
target(targetInst), targetFunction(nullptr), decoration(decor),
index(llvm::None), params(), idParams(ids.begin(), ids.end()) {}
SpirvDecoration::SpirvDecoration(SourceLocation loc, SpirvFunction *targetFunc,
spv::Decoration decor,
llvm::ArrayRef<uint32_t> p)
: SpirvInstruction(IK_Decoration, spv::Op::OpDecorate,
/*type*/ {}, loc),
target(nullptr), targetFunction(targetFunc), decoration(decor),
index(llvm::None), params(p.begin(), p.end()), idParams() {}
spv::Op SpirvDecoration::getDecorateOpcode(
spv::Decoration decoration, const llvm::Optional<uint32_t> &memberIndex) {
if (decoration == spv::Decoration::HlslSemanticGOOGLE ||
decoration == spv::Decoration::UserTypeGOOGLE)
return memberIndex.hasValue() ? spv::Op::OpMemberDecorateStringGOOGLE
: spv::Op::OpDecorateStringGOOGLE;
return memberIndex.hasValue() ? spv::Op::OpMemberDecorate
: spv::Op::OpDecorate;
}
spv::Op SpirvDecoration::getDecorateStringOpcode(bool isMemberDecoration) {
return isMemberDecoration ? spv::Op::OpMemberDecorateString
: spv::Op::OpDecorateString;
}
bool SpirvDecoration::operator==(const SpirvDecoration &that) const {
return target == that.target && decoration == that.decoration &&
params == that.params && idParams == that.idParams &&
index.hasValue() == that.index.hasValue() &&
(!index.hasValue() || index.getValue() == that.index.getValue());
}
SpirvVariable::SpirvVariable(QualType resultType, SourceLocation loc,
spv::StorageClass sc, bool precise,
bool isNointerp, SpirvInstruction *initializerInst)
: SpirvInstruction(IK_Variable, spv::Op::OpVariable, resultType, loc),
initializer(initializerInst), descriptorSet(-1), binding(-1),
hlslUserType("") {
setStorageClass(sc);
setPrecise(precise);
setNoninterpolated(isNointerp);
}
SpirvVariable::SpirvVariable(const SpirvType *spvType, SourceLocation loc,
spv::StorageClass sc, bool precise,
bool isNointerp, SpirvInstruction *initializerInst)
: SpirvInstruction(IK_Variable, spv::Op::OpVariable, QualType(), loc),
initializer(initializerInst), descriptorSet(-1), binding(-1),
hlslUserType("") {
setResultType(spvType);
setStorageClass(sc);
setPrecise(precise);
setNoninterpolated(isNointerp);
}
SpirvFunctionParameter::SpirvFunctionParameter(QualType resultType,
bool isPrecise, bool isNointerp,
SourceLocation loc)
: SpirvInstruction(IK_FunctionParameter, spv::Op::OpFunctionParameter,
resultType, loc) {
setPrecise(isPrecise);
setNoninterpolated(isNointerp);
}
SpirvFunctionParameter::SpirvFunctionParameter(const SpirvType *spvType,
bool isPrecise, bool isNointerp,
SourceLocation loc)
: SpirvInstruction(IK_FunctionParameter, spv::Op::OpFunctionParameter,
QualType(), loc) {
setResultType(spvType);
setPrecise(isPrecise);
setNoninterpolated(isNointerp);
}
SpirvMerge::SpirvMerge(Kind kind, spv::Op op, SourceLocation loc,
SpirvBasicBlock *mergeLabel, SourceRange range)
: SpirvInstruction(kind, op, QualType(), loc, range),
mergeBlock(mergeLabel) {}
SpirvLoopMerge::SpirvLoopMerge(SourceLocation loc, SpirvBasicBlock *mergeBlock,
SpirvBasicBlock *contTarget,
spv::LoopControlMask mask, SourceRange range)
: SpirvMerge(IK_LoopMerge, spv::Op::OpLoopMerge, loc, mergeBlock, range),
continueTarget(contTarget), loopControlMask(mask) {}
SpirvSelectionMerge::SpirvSelectionMerge(SourceLocation loc,
SpirvBasicBlock *mergeBlock,
spv::SelectionControlMask mask,
SourceRange range)
: SpirvMerge(IK_SelectionMerge, spv::Op::OpSelectionMerge, loc, mergeBlock,
range),
selControlMask(mask) {}
SpirvTerminator::SpirvTerminator(Kind kind, spv::Op op, SourceLocation loc,
SourceRange range)
: SpirvInstruction(kind, op, QualType(), loc, range) {}
SpirvBranching::SpirvBranching(Kind kind, spv::Op op, SourceLocation loc,
SourceRange range)
: SpirvTerminator(kind, op, loc, range) {}
SpirvBranch::SpirvBranch(SourceLocation loc, SpirvBasicBlock *target,
SourceRange range)
: SpirvBranching(IK_Branch, spv::Op::OpBranch, loc, range),
targetLabel(target) {}
SpirvBranchConditional::SpirvBranchConditional(SourceLocation loc,
SpirvInstruction *cond,
SpirvBasicBlock *trueInst,
SpirvBasicBlock *falseInst)
: SpirvBranching(IK_BranchConditional, spv::Op::OpBranchConditional, loc),
condition(cond), trueLabel(trueInst), falseLabel(falseInst) {}
SpirvKill::SpirvKill(SourceLocation loc, SourceRange range)
: SpirvTerminator(IK_Kill, spv::Op::OpKill, loc, range) {}
SpirvReturn::SpirvReturn(SourceLocation loc, SpirvInstruction *retVal,
SourceRange range)
: SpirvTerminator(IK_Return,
retVal ? spv::Op::OpReturnValue : spv::Op::OpReturn, loc,
range),
returnValue(retVal) {}
SpirvSwitch::SpirvSwitch(
SourceLocation loc, SpirvInstruction *selectorInst,
SpirvBasicBlock *defaultLbl,
llvm::ArrayRef<std::pair<llvm::APInt, SpirvBasicBlock *>> &targetsVec)
: SpirvBranching(IK_Switch, spv::Op::OpSwitch, loc), selector(selectorInst),
defaultLabel(defaultLbl), targets(targetsVec.begin(), targetsVec.end()) {}
// Switch instruction methods.
SpirvBasicBlock *SpirvSwitch::getTargetLabelForLiteral(uint32_t lit) const {
for (auto pair : targets)
if (pair.first == lit)
return pair.second;
return defaultLabel;
}
llvm::ArrayRef<SpirvBasicBlock *> SpirvSwitch::getTargetBranches() const {
llvm::SmallVector<SpirvBasicBlock *, 4> branches;
for (auto pair : targets)
branches.push_back(pair.second);
branches.push_back(defaultLabel);
return branches;
}
SpirvUnreachable::SpirvUnreachable(SourceLocation loc)
: SpirvTerminator(IK_Unreachable, spv::Op::OpUnreachable, loc) {}
SpirvAccessChain::SpirvAccessChain(QualType resultType, SourceLocation loc,
SpirvInstruction *baseInst,
llvm::ArrayRef<SpirvInstruction *> indexVec,
SourceRange range)
: SpirvInstruction(IK_AccessChain, spv::Op::OpAccessChain, resultType, loc,
range),
base(baseInst), indices(indexVec.begin(), indexVec.end()) {
if (baseInst && baseInst->isNoninterpolated())
setNoninterpolated();
}
SpirvAtomic::SpirvAtomic(spv::Op op, QualType resultType, SourceLocation loc,
SpirvInstruction *pointerInst, spv::Scope s,
spv::MemorySemanticsMask mask,
SpirvInstruction *valueInst, SourceRange range)
: SpirvInstruction(IK_Atomic, op, resultType, loc, range),
pointer(pointerInst), scope(s), memorySemantic(mask),
memorySemanticUnequal(spv::MemorySemanticsMask::MaskNone),
value(valueInst), comparator(nullptr) {
assert(
op == spv::Op::OpAtomicLoad || op == spv::Op::OpAtomicIIncrement ||
op == spv::Op::OpAtomicIDecrement || op == spv::Op::OpAtomicFlagClear ||
op == spv::Op::OpAtomicFlagTestAndSet || op == spv::Op::OpAtomicStore ||
op == spv::Op::OpAtomicAnd || op == spv::Op::OpAtomicOr ||
op == spv::Op::OpAtomicXor || op == spv::Op::OpAtomicIAdd ||
op == spv::Op::OpAtomicISub || op == spv::Op::OpAtomicSMin ||
op == spv::Op::OpAtomicUMin || op == spv::Op::OpAtomicSMax ||
op == spv::Op::OpAtomicUMax || op == spv::Op::OpAtomicExchange);
}
SpirvAtomic::SpirvAtomic(spv::Op op, QualType resultType, SourceLocation loc,
SpirvInstruction *pointerInst, spv::Scope s,
spv::MemorySemanticsMask semanticsEqual,
spv::MemorySemanticsMask semanticsUnequal,
SpirvInstruction *valueInst,
SpirvInstruction *comparatorInst, SourceRange range)
: SpirvInstruction(IK_Atomic, op, resultType, loc, range),
pointer(pointerInst), scope(s), memorySemantic(semanticsEqual),
memorySemanticUnequal(semanticsUnequal), value(valueInst),
comparator(comparatorInst) {
assert(op == spv::Op::OpAtomicCompareExchange);
}
SpirvBarrier::SpirvBarrier(SourceLocation loc, spv::Scope memScope,
spv::MemorySemanticsMask memSemantics,
llvm::Optional<spv::Scope> execScope,
SourceRange range)
: SpirvInstruction(IK_Barrier,
execScope.hasValue() ? spv::Op::OpControlBarrier
: spv::Op::OpMemoryBarrier,
QualType(), loc, range),
memoryScope(memScope), memorySemantics(memSemantics),
executionScope(execScope) {}
SpirvBinaryOp::SpirvBinaryOp(spv::Op opcode, QualType resultType,
SourceLocation loc, SpirvInstruction *op1,
SpirvInstruction *op2, SourceRange range)
: SpirvInstruction(IK_BinaryOp, opcode, resultType, loc, range),
operand1(op1), operand2(op2) {}
SpirvBitField::SpirvBitField(Kind kind, spv::Op op, QualType resultType,
SourceLocation loc, SpirvInstruction *baseInst,
SpirvInstruction *offsetInst,
SpirvInstruction *countInst)
: SpirvInstruction(kind, op, resultType, loc), base(baseInst),
offset(offsetInst), count(countInst) {}
SpirvBitFieldExtract::SpirvBitFieldExtract(QualType resultType,
SourceLocation loc,
SpirvInstruction *baseInst,
SpirvInstruction *offsetInst,
SpirvInstruction *countInst)
: SpirvBitField(IK_BitFieldExtract,
resultType->isSignedIntegerOrEnumerationType()
? spv::Op::OpBitFieldSExtract
: spv::Op::OpBitFieldUExtract,
resultType, loc, baseInst, offsetInst, countInst) {}
SpirvBitFieldInsert::SpirvBitFieldInsert(QualType resultType,
SourceLocation loc,
SpirvInstruction *baseInst,
SpirvInstruction *insertInst,
SpirvInstruction *offsetInst,
SpirvInstruction *countInst)
: SpirvBitField(IK_BitFieldInsert, spv::Op::OpBitFieldInsert, resultType,
loc, baseInst, offsetInst, countInst),
insert(insertInst) {}
SpirvCompositeConstruct::SpirvCompositeConstruct(
QualType resultType, SourceLocation loc,
llvm::ArrayRef<SpirvInstruction *> constituentsVec, SourceRange range)
: SpirvInstruction(IK_CompositeConstruct, spv::Op::OpCompositeConstruct,
resultType, loc, range),
consituents(constituentsVec.begin(), constituentsVec.end()) {}
SpirvConstant::SpirvConstant(Kind kind, spv::Op op, const SpirvType *spvType,
bool literal)
: SpirvInstruction(kind, op, QualType(),
/*SourceLocation*/ {}),
literalConstant(literal) {
setResultType(spvType);
}
SpirvConstant::SpirvConstant(Kind kind, spv::Op op, QualType resultType,
bool literal)
: SpirvInstruction(kind, op, resultType,
/*SourceLocation*/ {}),
literalConstant(literal) {}
bool SpirvConstant::operator==(const SpirvConstant &that) const {
if (auto *booleanInst = dyn_cast<SpirvConstantBoolean>(this)) {
auto *thatBooleanInst = dyn_cast<SpirvConstantBoolean>(&that);
if (thatBooleanInst == nullptr)
return false;
return *booleanInst == *thatBooleanInst;
} else if (auto *integerInst = dyn_cast<SpirvConstantInteger>(this)) {
auto *thatIntegerInst = dyn_cast<SpirvConstantInteger>(&that);
if (thatIntegerInst == nullptr)
return false;
return *integerInst == *thatIntegerInst;
} else if (auto *floatInst = dyn_cast<SpirvConstantFloat>(this)) {
auto *thatFloatInst = dyn_cast<SpirvConstantFloat>(&that);
if (thatFloatInst == nullptr)
return false;
return *floatInst == *thatFloatInst;
} else if (auto *compositeInst = dyn_cast<SpirvConstantComposite>(this)) {
auto *thatCompositeInst = dyn_cast<SpirvConstantComposite>(&that);
if (thatCompositeInst == nullptr)
return false;
return *compositeInst == *thatCompositeInst;
} else if (auto *nullInst = dyn_cast<SpirvConstantNull>(this)) {
auto *thatNullInst = dyn_cast<SpirvConstantNull>(&that);
if (thatNullInst == nullptr)
return false;
return *nullInst == *thatNullInst;
} else if (auto *nullInst = dyn_cast<SpirvUndef>(this)) {
auto *thatNullInst = dyn_cast<SpirvUndef>(&that);
if (thatNullInst == nullptr)
return false;
return *nullInst == *thatNullInst;
}
assert(false && "operator== undefined for SpirvConstant subclass");
return false;
}
bool SpirvConstant::isSpecConstant() const {
return opcode == spv::Op::OpSpecConstant ||
opcode == spv::Op::OpSpecConstantTrue ||
opcode == spv::Op::OpSpecConstantFalse ||
opcode == spv::Op::OpSpecConstantComposite;
}
SpirvConstantBoolean::SpirvConstantBoolean(QualType type, bool val,
bool isSpecConst)
: SpirvConstant(IK_ConstantBoolean,
val ? (isSpecConst ? spv::Op::OpSpecConstantTrue
: spv::Op::OpConstantTrue)
: (isSpecConst ? spv::Op::OpSpecConstantFalse
: spv::Op::OpConstantFalse),
type),
value(val) {}
bool SpirvConstantBoolean::operator==(const SpirvConstantBoolean &that) const {
return resultType == that.resultType && astResultType == that.astResultType &&
value == that.value && opcode == that.opcode;
}
SpirvConstantInteger::SpirvConstantInteger(QualType type, llvm::APInt val,
bool isSpecConst)
: SpirvConstant(IK_ConstantInteger,
isSpecConst ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
type),
value(val) {
assert(type->isIntegralOrEnumerationType());
}
bool SpirvConstantInteger::operator==(const SpirvConstantInteger &that) const {
return resultType == that.resultType && astResultType == that.astResultType &&
value == that.value && opcode == that.opcode;
}
SpirvConstantFloat::SpirvConstantFloat(QualType type, llvm::APFloat val,
bool isSpecConst)
: SpirvConstant(IK_ConstantFloat,
isSpecConst ? spv::Op::OpSpecConstant : spv::Op::OpConstant,
type),
value(val) {
assert(type->isFloatingType());
}
bool SpirvConstantFloat::operator==(const SpirvConstantFloat &that) const {
return resultType == that.resultType && astResultType == that.astResultType &&
value.bitwiseIsEqual(that.value) && opcode == that.opcode;
}
SpirvConstantComposite::SpirvConstantComposite(
QualType type, llvm::ArrayRef<SpirvConstant *> constituentsVec,
bool isSpecConst)
: SpirvConstant(IK_ConstantComposite,
isSpecConst ? spv::Op::OpSpecConstantComposite
: spv::Op::OpConstantComposite,
type),
constituents(constituentsVec.begin(), constituentsVec.end()) {}
SpirvConstantNull::SpirvConstantNull(QualType type)
: SpirvConstant(IK_ConstantNull, spv::Op::OpConstantNull, type) {}
bool SpirvConstantNull::operator==(const SpirvConstantNull &that) const {
return opcode == that.opcode && resultType == that.resultType &&
astResultType == that.astResultType;
}
SpirvUndef::SpirvUndef(QualType type)
: SpirvInstruction(IK_Undef, spv::Op::OpUndef, type,
/*SourceLocation*/ {}) {}
bool SpirvUndef::operator==(const SpirvUndef &that) const {
return opcode == that.opcode && resultType == that.resultType &&
astResultType == that.astResultType;
}
SpirvCompositeExtract::SpirvCompositeExtract(QualType resultType,
SourceLocation loc,
SpirvInstruction *compositeInst,
llvm::ArrayRef<uint32_t> indexVec,
SourceRange range)
: SpirvInstruction(IK_CompositeExtract, spv::Op::OpCompositeExtract,
resultType, loc, range),
composite(compositeInst), indices(indexVec.begin(), indexVec.end()) {
if (compositeInst && compositeInst->isNoninterpolated())
setNoninterpolated();
}
SpirvCompositeInsert::SpirvCompositeInsert(QualType resultType,
SourceLocation loc,
SpirvInstruction *compositeInst,
SpirvInstruction *objectInst,
llvm::ArrayRef<uint32_t> indexVec,
SourceRange range)
: SpirvInstruction(IK_CompositeInsert, spv::Op::OpCompositeInsert,
resultType, loc, range),
composite(compositeInst), object(objectInst),
indices(indexVec.begin(), indexVec.end()) {}
SpirvEmitVertex::SpirvEmitVertex(SourceLocation loc, SourceRange range)
: SpirvInstruction(IK_EmitVertex, spv::Op::OpEmitVertex, QualType(), loc,
range) {}
SpirvEndPrimitive::SpirvEndPrimitive(SourceLocation loc, SourceRange range)
: SpirvInstruction(IK_EndPrimitive, spv::Op::OpEndPrimitive, QualType(),
loc, range) {}
SpirvExtInst::SpirvExtInst(QualType resultType, SourceLocation loc,
SpirvExtInstImport *set, uint32_t inst,
llvm::ArrayRef<SpirvInstruction *> operandsVec,
SourceRange range)
: SpirvInstruction(IK_ExtInst, spv::Op::OpExtInst, resultType, loc, range),
instructionSet(set), instruction(inst),
operands(operandsVec.begin(), operandsVec.end()) {}
SpirvFunctionCall::SpirvFunctionCall(QualType resultType, SourceLocation loc,
SpirvFunction *fn,
llvm::ArrayRef<SpirvInstruction *> argsVec,
SourceRange range)
: SpirvInstruction(IK_FunctionCall, spv::Op::OpFunctionCall, resultType,
loc, range),
function(fn), args(argsVec.begin(), argsVec.end()) {}
SpirvGroupNonUniformOp::SpirvGroupNonUniformOp(
spv::Op op, QualType resultType, spv::Scope scope,
llvm::ArrayRef<SpirvInstruction *> operandsVec, SourceLocation loc,
llvm::Optional<spv::GroupOperation> group)
: SpirvInstruction(IK_GroupNonUniformOp, op, resultType, loc),
execScope(scope), operands(operandsVec.begin(), operandsVec.end()),
groupOp(group) {
switch (op) {
// Group non-uniform nullary operations.
case spv::Op::OpGroupNonUniformElect:
assert(operandsVec.size() == 0);
break;
// Group non-uniform unary operations.
case spv::Op::OpGroupNonUniformAll:
case spv::Op::OpGroupNonUniformAny:
case spv::Op::OpGroupNonUniformAllEqual:
case spv::Op::OpGroupNonUniformBroadcastFirst:
case spv::Op::OpGroupNonUniformBallot:
case spv::Op::OpGroupNonUniformInverseBallot:
case spv::Op::OpGroupNonUniformBallotBitCount:
case spv::Op::OpGroupNonUniformBallotFindLSB:
case spv::Op::OpGroupNonUniformBallotFindMSB:
case spv::Op::OpGroupNonUniformSMin:
case spv::Op::OpGroupNonUniformUMin:
case spv::Op::OpGroupNonUniformFMin:
case spv::Op::OpGroupNonUniformSMax:
case spv::Op::OpGroupNonUniformUMax:
case spv::Op::OpGroupNonUniformFMax:
case spv::Op::OpGroupNonUniformLogicalAnd:
case spv::Op::OpGroupNonUniformLogicalOr:
case spv::Op::OpGroupNonUniformLogicalXor:
assert(operandsVec.size() == 1);
break;
// Group non-uniform binary operations.
case spv::Op::OpGroupNonUniformBroadcast:
case spv::Op::OpGroupNonUniformBallotBitExtract:
case spv::Op::OpGroupNonUniformShuffle:
case spv::Op::OpGroupNonUniformShuffleXor:
case spv::Op::OpGroupNonUniformShuffleUp:
case spv::Op::OpGroupNonUniformShuffleDown:
case spv::Op::OpGroupNonUniformQuadBroadcast:
case spv::Op::OpGroupNonUniformQuadSwap:
assert(operandsVec.size() == 2);
break;
// Group non-uniform operations with a required and optional operand.
case spv::Op::OpGroupNonUniformIAdd:
case spv::Op::OpGroupNonUniformFAdd:
case spv::Op::OpGroupNonUniformIMul:
case spv::Op::OpGroupNonUniformFMul:
case spv::Op::OpGroupNonUniformBitwiseAnd:
case spv::Op::OpGroupNonUniformBitwiseOr:
case spv::Op::OpGroupNonUniformBitwiseXor:
assert(operandsVec.size() >= 1 && operandsVec.size() <= 2);
break;
// Unexpected opcode.
default:
assert(false && "Unexpected Group non-uniform opcode");
break;
}
}
SpirvImageOp::SpirvImageOp(
spv::Op op, QualType resultType, SourceLocation loc,
SpirvInstruction *imageInst, SpirvInstruction *coordinateInst,
spv::ImageOperandsMask mask, SpirvInstruction *drefInst,
SpirvInstruction *biasInst, SpirvInstruction *lodInst,
SpirvInstruction *gradDxInst, SpirvInstruction *gradDyInst,
SpirvInstruction *constOffsetInst, SpirvInstruction *offsetInst,
SpirvInstruction *constOffsetsInst, SpirvInstruction *sampleInst,
SpirvInstruction *minLodInst, SpirvInstruction *componentInst,
SpirvInstruction *texelToWriteInst, SourceRange range)
: SpirvInstruction(IK_ImageOp, op, resultType, loc, range),
image(imageInst), coordinate(coordinateInst), dref(drefInst),
bias(biasInst), lod(lodInst), gradDx(gradDxInst), gradDy(gradDyInst),
constOffset(constOffsetInst), offset(offsetInst),
constOffsets(constOffsetsInst), sample(sampleInst), minLod(minLodInst),
component(componentInst), texelToWrite(texelToWriteInst),
operandsMask(mask) {
assert(op == spv::Op::OpImageSampleImplicitLod ||
op == spv::Op::OpImageSampleExplicitLod ||
op == spv::Op::OpImageSampleDrefImplicitLod ||
op == spv::Op::OpImageSampleDrefExplicitLod ||
op == spv::Op::OpImageSparseSampleImplicitLod ||
op == spv::Op::OpImageSparseSampleExplicitLod ||
op == spv::Op::OpImageSparseSampleDrefImplicitLod ||
op == spv::Op::OpImageSparseSampleDrefExplicitLod ||
op == spv::Op::OpImageFetch || op == spv::Op::OpImageSparseFetch ||
op == spv::Op::OpImageGather || op == spv::Op::OpImageSparseGather ||
op == spv::Op::OpImageDrefGather ||
op == spv::Op::OpImageSparseDrefGather || op == spv::Op::OpImageRead ||
op == spv::Op::OpImageSparseRead || op == spv::Op::OpImageWrite);
if (op == spv::Op::OpImageSampleExplicitLod ||
op == spv::Op::OpImageSampleDrefExplicitLod ||
op == spv::Op::OpImageSparseSampleExplicitLod ||
op == spv::Op::OpImageSparseSampleDrefExplicitLod) {
assert(lod || (gradDx && gradDy));
}
if (op == spv::Op::OpImageSampleDrefImplicitLod ||
op == spv::Op::OpImageSampleDrefExplicitLod ||
op == spv::Op::OpImageSparseSampleDrefImplicitLod ||
op == spv::Op::OpImageSparseSampleDrefExplicitLod ||
op == spv::Op::OpImageDrefGather ||
op == spv::Op::OpImageSparseDrefGather) {
assert(dref);
}
if (op == spv::Op::OpImageWrite) {
assert(texelToWrite);
}
if (op == spv::Op::OpImageGather || op == spv::Op::OpImageSparseGather) {
assert(component);
}
}
bool SpirvImageOp::isSparse() const {
return opcode == spv::Op::OpImageSparseSampleImplicitLod ||
opcode == spv::Op::OpImageSparseSampleExplicitLod ||
opcode == spv::Op::OpImageSparseSampleDrefImplicitLod ||
opcode == spv::Op::OpImageSparseSampleDrefExplicitLod ||
opcode == spv::Op::OpImageSparseFetch ||
opcode == spv::Op::OpImageSparseGather ||
opcode == spv::Op::OpImageSparseDrefGather ||
opcode == spv::Op::OpImageSparseRead;
}
SpirvImageQuery::SpirvImageQuery(spv::Op op, QualType resultType,
SourceLocation loc, SpirvInstruction *img,
SpirvInstruction *lodInst,
SpirvInstruction *coordInst, SourceRange range)
: SpirvInstruction(IK_ImageQuery, op, resultType, loc, range), image(img),
lod(lodInst), coordinate(coordInst) {
assert(op == spv::Op::OpImageQueryFormat ||
op == spv::Op::OpImageQueryOrder || op == spv::Op::OpImageQuerySize ||
op == spv::Op::OpImageQueryLevels ||
op == spv::Op::OpImageQuerySamples || op == spv::Op::OpImageQueryLod ||
op == spv::Op::OpImageQuerySizeLod);
if (lodInst)
assert(op == spv::Op::OpImageQuerySizeLod);
if (coordInst)
assert(op == spv::Op::OpImageQueryLod);
}
SpirvImageSparseTexelsResident::SpirvImageSparseTexelsResident(
QualType resultType, SourceLocation loc, SpirvInstruction *resCode,
SourceRange range)
: SpirvInstruction(IK_ImageSparseTexelsResident,
spv::Op::OpImageSparseTexelsResident, resultType, loc,
range),
residentCode(resCode) {}
SpirvImageTexelPointer::SpirvImageTexelPointer(QualType resultType,
SourceLocation loc,
SpirvInstruction *imageInst,
SpirvInstruction *coordinateInst,
SpirvInstruction *sampleInst)
: SpirvInstruction(IK_ImageTexelPointer, spv::Op::OpImageTexelPointer,
resultType, loc),
image(imageInst), coordinate(coordinateInst), sample(sampleInst) {}
SpirvLoad::SpirvLoad(QualType resultType, SourceLocation loc,
SpirvInstruction *pointerInst, SourceRange range,
llvm::Optional<spv::MemoryAccessMask> mask)
: SpirvInstruction(IK_Load, spv::Op::OpLoad, resultType, loc, range),
pointer(pointerInst), memoryAccess(mask) {}
void SpirvLoad::setAlignment(uint32_t alignment) {
assert(alignment != 0);
assert(llvm::isPowerOf2_32(alignment));
if (!memoryAccess.hasValue()) {
memoryAccess = spv::MemoryAccessMask::Aligned;
} else {
memoryAccess.getValue() =
memoryAccess.getValue() | spv::MemoryAccessMask::Aligned;
}
memoryAlignment = alignment;
}
SpirvCopyObject::SpirvCopyObject(QualType resultType, SourceLocation loc,
SpirvInstruction *pointerInst)
: SpirvInstruction(IK_CopyObject, spv::Op::OpCopyObject, resultType, loc),
pointer(pointerInst) {}
SpirvSampledImage::SpirvSampledImage(QualType resultType, SourceLocation loc,
SpirvInstruction *imageInst,
SpirvInstruction *samplerInst,
SourceRange range)
: SpirvInstruction(IK_SampledImage, spv::Op::OpSampledImage, resultType,
loc, range),
image(imageInst), sampler(samplerInst) {}
SpirvSelect::SpirvSelect(QualType resultType, SourceLocation loc,
SpirvInstruction *cond, SpirvInstruction *trueInst,
SpirvInstruction *falseInst, SourceRange range)
: SpirvInstruction(IK_Select, spv::Op::OpSelect, resultType, loc, range),
condition(cond), trueObject(trueInst), falseObject(falseInst) {}
SpirvSpecConstantBinaryOp::SpirvSpecConstantBinaryOp(spv::Op specConstantOp,
QualType resultType,
SourceLocation loc,
SpirvInstruction *op1,
SpirvInstruction *op2)
: SpirvInstruction(IK_SpecConstantBinaryOp, spv::Op::OpSpecConstantOp,
resultType, loc),
specOp(specConstantOp), operand1(op1), operand2(op2) {}
SpirvSpecConstantUnaryOp::SpirvSpecConstantUnaryOp(spv::Op specConstantOp,
QualType resultType,
SourceLocation loc,
SpirvInstruction *op)
: SpirvInstruction(IK_SpecConstantUnaryOp, spv::Op::OpSpecConstantOp,
resultType, loc),
specOp(specConstantOp), operand(op) {}
SpirvStore::SpirvStore(SourceLocation loc, SpirvInstruction *pointerInst,
SpirvInstruction *objectInst,
llvm::Optional<spv::MemoryAccessMask> mask,
SourceRange range)
: SpirvInstruction(IK_Store, spv::Op::OpStore, QualType(), loc, range),
pointer(pointerInst), object(objectInst), memoryAccess(mask) {}
void SpirvStore::setAlignment(uint32_t alignment) {
assert(alignment != 0);
assert(llvm::isPowerOf2_32(alignment));
if (!memoryAccess.hasValue()) {
memoryAccess = spv::MemoryAccessMask::Aligned;
} else {
memoryAccess.getValue() =
memoryAccess.getValue() | spv::MemoryAccessMask::Aligned;
}
memoryAlignment = alignment;
}
SpirvNullaryOp::SpirvNullaryOp(spv::Op opcode, SourceLocation loc,
SourceRange range)
: SpirvInstruction(IK_NullaryOp, opcode, QualType(), loc, range) {}
SpirvUnaryOp::SpirvUnaryOp(spv::Op opcode, QualType resultType,
SourceLocation loc, SpirvInstruction *op,
SourceRange range)
: SpirvInstruction(IK_UnaryOp, opcode, resultType, loc, range),
operand(op) {}
SpirvUnaryOp::SpirvUnaryOp(spv::Op opcode, const SpirvType *resultType,
SourceLocation loc, SpirvInstruction *op)
: SpirvInstruction(IK_UnaryOp, opcode, QualType(), loc), operand(op) {
setResultType(resultType);
}
bool SpirvUnaryOp::isConversionOp() const {
return opcode == spv::Op::OpConvertFToU || opcode == spv::Op::OpConvertFToS ||
opcode == spv::Op::OpConvertSToF || opcode == spv::Op::OpConvertUToF ||
opcode == spv::Op::OpUConvert || opcode == spv::Op::OpSConvert ||
opcode == spv::Op::OpFConvert || opcode == spv::Op::OpQuantizeToF16 ||
opcode == spv::Op::OpBitcast;
}
SpirvVectorShuffle::SpirvVectorShuffle(QualType resultType, SourceLocation loc,
SpirvInstruction *vec1Inst,
SpirvInstruction *vec2Inst,
llvm::ArrayRef<uint32_t> componentsVec,
SourceRange range)
: SpirvInstruction(IK_VectorShuffle, spv::Op::OpVectorShuffle, resultType,
loc, range),
vec1(vec1Inst), vec2(vec2Inst),
components(componentsVec.begin(), componentsVec.end()) {}
SpirvArrayLength::SpirvArrayLength(QualType resultType, SourceLocation loc,
SpirvInstruction *structure_,
uint32_t memberLiteral, SourceRange range)
: SpirvInstruction(IK_ArrayLength, spv::Op::OpArrayLength, resultType, loc,
range),
structure(structure_), arrayMember(memberLiteral) {}
SpirvRayTracingOpNV::SpirvRayTracingOpNV(
QualType resultType, spv::Op opcode,
llvm::ArrayRef<SpirvInstruction *> vecOperands, SourceLocation loc)
: SpirvInstruction(IK_RayTracingOpNV, opcode, resultType, loc),
operands(vecOperands.begin(), vecOperands.end()) {}
SpirvDemoteToHelperInvocation::SpirvDemoteToHelperInvocation(SourceLocation loc)
: SpirvInstruction(IK_DemoteToHelperInvocation,
spv::Op::OpDemoteToHelperInvocation, /*QualType*/ {},
loc) {}
SpirvIsHelperInvocationEXT::SpirvIsHelperInvocationEXT(QualType resultType,
SourceLocation loc)
: SpirvInstruction(IK_IsHelperInvocationEXT,
spv::Op::OpIsHelperInvocationEXT, resultType, loc) {}
// Note: we are using a null result type in the constructor. All debug
// instructions should later get OpTypeVoid as their result type.
SpirvDebugInstruction::SpirvDebugInstruction(Kind kind, uint32_t opcode)
: SpirvInstruction(kind, spv::Op::OpExtInst,
/*result type */ {},
/*SourceLocation*/ {}),
debugOpcode(opcode), debugSpirvType(nullptr), debugType(nullptr),
instructionSet(nullptr) {}
SpirvDebugInfoNone::SpirvDebugInfoNone()
: SpirvDebugInstruction(IK_DebugInfoNone, /*opcode*/ 0u) {}
SpirvDebugSource::SpirvDebugSource(llvm::StringRef f, llvm::StringRef t)
: SpirvDebugInstruction(IK_DebugSource, /*opcode*/ 35u), file(f), text(t) {}
SpirvDebugCompilationUnit::SpirvDebugCompilationUnit(uint32_t spvVer,
uint32_t dwarfVer,
SpirvDebugSource *src)
: SpirvDebugInstruction(IK_DebugCompilationUnit, /*opcode*/ 1u),
spirvVersion(spvVer), dwarfVersion(dwarfVer), source(src),
lang(spv::SourceLanguage::HLSL) {}
SpirvDebugFunction::SpirvDebugFunction(
llvm::StringRef name, SpirvDebugSource *src, uint32_t fline, uint32_t fcol,
SpirvDebugInstruction *parent, llvm::StringRef linkName, uint32_t flags_,
uint32_t bodyLine, SpirvFunction *func)
: SpirvDebugInstruction(IK_DebugFunction, /*opcode*/ 20u), source(src),
fnLine(fline), fnColumn(fcol), parentScope(parent), linkageName(linkName),
flags(flags_), scopeLine(bodyLine), fn(func), debugNone(nullptr),
fnType(nullptr) {
debugName = name;
}
SpirvDebugFunctionDeclaration::SpirvDebugFunctionDeclaration(
llvm::StringRef name, SpirvDebugSource *src, uint32_t fline, uint32_t fcol,
SpirvDebugInstruction *parent, llvm::StringRef linkName, uint32_t flags_)
: SpirvDebugInstruction(IK_DebugFunctionDecl, /*opcode*/ 19u), source(src),
fnLine(fline), fnColumn(fcol), parentScope(parent), linkageName(linkName),
flags(flags_) {
debugName = name;
}
SpirvDebugFunctionDefinition::SpirvDebugFunctionDefinition(
SpirvDebugFunction *function_, SpirvFunction *fn_)
: SpirvDebugInstruction(IK_DebugFunctionDef, /*opcode*/ 101u),
function(function_), fn(fn_) {}
SpirvDebugEntryPoint::SpirvDebugEntryPoint(SpirvDebugFunction *ep_,
SpirvDebugCompilationUnit *cu_,
llvm::StringRef signature_,
llvm::StringRef args_)
: SpirvDebugInstruction(IK_DebugEntryPoint, /*opcode*/ 107u), ep(ep_),
cu(cu_), signature(signature_), args(args_) {}
SpirvDebugLocalVariable::SpirvDebugLocalVariable(
QualType debugQualType_, llvm::StringRef varName, SpirvDebugSource *src,
uint32_t lineNumber, uint32_t colNumber, SpirvDebugInstruction *parent,
uint32_t flags_, llvm::Optional<uint32_t> argNumber_)
: SpirvDebugInstruction(IK_DebugLocalVariable, /*opcode*/ 26u), source(src),
line(lineNumber), column(colNumber), parentScope(parent), flags(flags_),
argNumber(argNumber_) {
debugName = varName;
setDebugQualType(debugQualType_);
}
SpirvDebugGlobalVariable::SpirvDebugGlobalVariable(
QualType debugQualType, llvm::StringRef varName, SpirvDebugSource *src,
uint32_t line_, uint32_t column_, SpirvDebugInstruction *parent,
llvm::StringRef linkageName_, SpirvVariable *var_, uint32_t flags_,
llvm::Optional<SpirvInstruction *> staticMemberDebugDecl_)
: SpirvDebugInstruction(IK_DebugGlobalVariable, /*opcode*/ 18u),
source(src), line(line_), column(column_), parentScope(parent),
linkageName(linkageName_), var(var_), flags(flags_),
staticMemberDebugDecl(staticMemberDebugDecl_) {
debugName = varName;
setDebugQualType(debugQualType);
setDebugType(nullptr);
}
SpirvDebugOperation::SpirvDebugOperation(uint32_t operationOpCode_,
llvm::ArrayRef<int32_t> operands_)
: SpirvDebugInstruction(IK_DebugOperation, /*opcode*/ 30u),
operationOpcode(operationOpCode_),
operands(operands_.begin(), operands_.end()) {}
SpirvDebugExpression::SpirvDebugExpression(
llvm::ArrayRef<SpirvDebugOperation *> operations_)
: SpirvDebugInstruction(IK_DebugExpression, /*opcode*/ 31u),
operations(operations_.begin(), operations_.end()) {}
SpirvDebugDeclare::SpirvDebugDeclare(SpirvDebugLocalVariable *debugVar_,
SpirvInstruction *var_,
SpirvDebugExpression *expr,
SourceLocation loc, SourceRange range)
: SpirvDebugInstruction(IK_DebugDeclare, /*opcode*/ 28u),
debugVar(debugVar_), var(var_), expression(expr) {
srcLoc = loc;
srcRange = range;
}
SpirvDebugLexicalBlock::SpirvDebugLexicalBlock(SpirvDebugSource *source_,
uint32_t line_, uint32_t column_,
SpirvDebugInstruction *parent_)
: SpirvDebugInstruction(IK_DebugLexicalBlock, /*opcode*/ 21u),
source(source_), line(line_), column(column_), parent(parent_) {}
SpirvDebugScope::SpirvDebugScope(SpirvDebugInstruction *scope_)
: SpirvDebugInstruction(IK_DebugScope, /*opcode*/ 23u), scope(scope_) {}
SpirvDebugTypeBasic::SpirvDebugTypeBasic(llvm::StringRef name,
SpirvConstant *size_,
uint32_t encoding_)
: SpirvDebugType(IK_DebugTypeBasic, /*opcode*/ 2u), size(size_),
encoding(encoding_) {
debugName = name;
}
uint32_t SpirvDebugTypeBasic::getSizeInBits() const {
auto *size_ = dyn_cast<SpirvConstantInteger>(size);
assert(size_ && "Size of DebugTypeBasic must be int type const.");
return size_->getValue().getLimitedValue();
}
SpirvDebugTypeArray::SpirvDebugTypeArray(SpirvDebugType *elemType,
llvm::ArrayRef<uint32_t> elemCount)
: SpirvDebugType(IK_DebugTypeArray, /*opcode*/ 5u), elementType(elemType),
elementCount(elemCount.begin(), elemCount.end()) {}
SpirvDebugTypeVector::SpirvDebugTypeVector(SpirvDebugType *elemType,
uint32_t elemCount)
: SpirvDebugType(IK_DebugTypeVector, /*opcode*/ 6u), elementType(elemType),
elementCount(elemCount) {}
SpirvDebugTypeMatrix::SpirvDebugTypeMatrix(SpirvDebugTypeVector *vectorType,
uint32_t vectorCount)
: SpirvDebugType(IK_DebugTypeMatrix, /*opcode*/ 108u),
vectorType(vectorType), vectorCount(vectorCount) {}
SpirvDebugTypeFunction::SpirvDebugTypeFunction(
uint32_t flags, SpirvDebugType *ret,
llvm::ArrayRef<SpirvDebugType *> params)
: SpirvDebugType(IK_DebugTypeFunction, /*opcode*/ 8u), debugFlags(flags),
returnType(ret), paramTypes(params.begin(), params.end()) {}
SpirvDebugTypeMember::SpirvDebugTypeMember(
llvm::StringRef name, SpirvDebugType *type, SpirvDebugSource *source_,
uint32_t line_, uint32_t column_, SpirvDebugInstruction *parent_,
uint32_t flags_, uint32_t offsetInBits_, uint32_t sizeInBits_,
const APValue *value_)
: SpirvDebugType(IK_DebugTypeMember, /*opcode*/ 11u), source(source_),
line(line_), column(column_), parent(parent_),
offsetInBits(offsetInBits_), sizeInBits(sizeInBits_), debugFlags(flags_),
value(value_) {
debugName = name;
setDebugType(type);
}
SpirvDebugTypeComposite::SpirvDebugTypeComposite(
llvm::StringRef name, SpirvDebugSource *source_, uint32_t line_,
uint32_t column_, SpirvDebugInstruction *parent_,
llvm::StringRef linkageName_, uint32_t flags_, uint32_t tag_)
: SpirvDebugType(IK_DebugTypeComposite, /*opcode*/ 10u), source(source_),
line(line_), column(column_), parent(parent_), linkageName(linkageName_),
debugFlags(flags_), tag(tag_), debugNone(nullptr) {
debugName = name;
}
SpirvDebugTypeTemplate::SpirvDebugTypeTemplate(
SpirvDebugInstruction *target_,
const llvm::SmallVector<SpirvDebugTypeTemplateParameter *, 2> ¶ms_)
: SpirvDebugType(IK_DebugTypeTemplate, /*opcode*/ 14u), target(target_),
params(params_) {}
SpirvDebugTypeTemplateParameter::SpirvDebugTypeTemplateParameter(
llvm::StringRef name, SpirvDebugType *type, SpirvInstruction *value_,
SpirvDebugSource *source_, uint32_t line_, uint32_t column_)
: SpirvDebugType(IK_DebugTypeTemplateParameter, /*opcode*/ 15u),
actualType(type), value(value_), source(source_), line(line_),
column(column_) {
debugName = name;
}
SpirvRayQueryOpKHR::SpirvRayQueryOpKHR(
QualType resultType, spv::Op opcode,
llvm::ArrayRef<SpirvInstruction *> vecOperands, bool flags,
SourceLocation loc, SourceRange range)
: SpirvInstruction(IK_RayQueryOpKHR, opcode, resultType, loc, range),
operands(vecOperands.begin(), vecOperands.end()), cullFlags(flags) {}
SpirvReadClock::SpirvReadClock(QualType resultType, SpirvInstruction *s,
SourceLocation loc)
: SpirvInstruction(IK_ReadClock, spv::Op::OpReadClockKHR, resultType, loc),
scope(s) {}
SpirvRayTracingTerminateOpKHR::SpirvRayTracingTerminateOpKHR(spv::Op opcode,
SourceLocation loc)
: SpirvTerminator(IK_RayTracingTerminate, opcode, loc) {
assert(opcode == spv::Op::OpTerminateRayKHR ||
opcode == spv::Op::OpIgnoreIntersectionKHR);
}
SpirvIntrinsicInstruction::SpirvIntrinsicInstruction(
QualType resultType, uint32_t opcode,
llvm::ArrayRef<SpirvInstruction *> vecOperands,
llvm::ArrayRef<llvm::StringRef> exts, SpirvExtInstImport *set,
llvm::ArrayRef<uint32_t> capts, SourceLocation loc)
: SpirvInstruction(IK_SpirvIntrinsicInstruction,
set != nullptr ? spv::Op::OpExtInst
: static_cast<spv::Op>(opcode),
resultType, loc),
instruction(opcode), operands(vecOperands.begin(), vecOperands.end()),
capabilities(capts.begin(), capts.end()),
extensions(exts.begin(), exts.end()), instructionSet(set) {}
SpirvEmitMeshTasksEXT::SpirvEmitMeshTasksEXT(
SpirvInstruction *xDim, SpirvInstruction *yDim, SpirvInstruction *zDim,
SpirvInstruction *payload, SourceLocation loc, SourceRange range)
: SpirvInstruction(IK_EmitMeshTasksEXT, spv::Op::OpEmitMeshTasksEXT,
QualType(), loc, range),
xDim(xDim), yDim(yDim), zDim(zDim), payload(payload) {}
SpirvSetMeshOutputsEXT::SpirvSetMeshOutputsEXT(SpirvInstruction *vertCount,
SpirvInstruction *primCount,
SourceLocation loc,
SourceRange range)
: SpirvInstruction(IK_SetMeshOutputsEXT, spv::Op::OpSetMeshOutputsEXT,
QualType(), loc, range),
vertCount(vertCount), primCount(primCount) {}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/StageVar.h | //===--- StageVar.h - Classes for stage variable information --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_STAGEVAR_H
#define LLVM_CLANG_LIB_SPIRV_STAGEVAR_H
#include "dxc/DXIL/DxilSemantic.h"
#include "dxc/DXIL/DxilSigPoint.h"
#include "clang/AST/Attr.h"
#include "clang/SPIRV/SpirvInstruction.h"
#include <string>
namespace clang {
namespace spirv {
/// A struct containing information about a particular HLSL semantic.
struct SemanticInfo {
llvm::StringRef str; ///< The original semantic string
const hlsl::Semantic *semantic; ///< The unique semantic object
llvm::StringRef name; ///< The semantic string without index
uint32_t index; ///< The semantic index
SourceLocation loc; ///< Source code location
bool isValid() const { return semantic != nullptr; }
inline hlsl::Semantic::Kind getKind() const;
/// \brief Returns true if this semantic is a SV_Target.
inline bool isTarget() const;
};
// A struct containing information about location and component decorations.
struct LocationAndComponent {
uint32_t location;
uint32_t component;
bool componentAlignment;
};
/// \brief The class containing HLSL and SPIR-V information about a Vulkan stage
/// (builtin/input/output) variable.
class StageVar {
public:
inline StageVar(const hlsl::SigPoint *sig, SemanticInfo semaInfo,
const VKBuiltInAttr *builtin, QualType astType,
LocationAndComponent locAndComponentCount)
: sigPoint(sig), semanticInfo(std::move(semaInfo)), builtinAttr(builtin),
type(astType), value(nullptr), isBuiltin(false),
storageClass(spv::StorageClass::Max), location(nullptr),
locationAndComponentCount(locAndComponentCount), entryPoint(nullptr),
locOrBuiltinDecorateAttr(false) {
isBuiltin = builtinAttr != nullptr;
}
const hlsl::SigPoint *getSigPoint() const { return sigPoint; }
const SemanticInfo &getSemanticInfo() const { return semanticInfo; }
std::string getSemanticStr() const;
QualType getAstType() const { return type; }
SpirvVariable *getSpirvInstr() const { return value; }
void setSpirvInstr(SpirvVariable *spvInstr) { value = spvInstr; }
const VKBuiltInAttr *getBuiltInAttr() const { return builtinAttr; }
bool isSpirvBuitin() const { return isBuiltin; }
void setIsSpirvBuiltin() { isBuiltin = true; }
spv::StorageClass getStorageClass() const { return storageClass; }
void setStorageClass(spv::StorageClass sc) { storageClass = sc; }
const VKLocationAttr *getLocationAttr() const { return location; }
void setLocationAttr(const VKLocationAttr *loc) { location = loc; }
const VKIndexAttr *getIndexAttr() const { return indexAttr; }
void setIndexAttr(const VKIndexAttr *idx) { indexAttr = idx; }
uint32_t getLocationCount() const {
return locationAndComponentCount.location;
}
LocationAndComponent getLocationAndComponentCount() const {
return locationAndComponentCount;
}
SpirvFunction *getEntryPoint() const { return entryPoint; }
void setEntryPoint(SpirvFunction *entry) { entryPoint = entry; }
bool hasLocOrBuiltinDecorateAttr() const { return locOrBuiltinDecorateAttr; }
void setIsLocOrBuiltinDecorateAttr() { locOrBuiltinDecorateAttr = true; }
private:
/// HLSL SigPoint. It uniquely identifies each set of parameters that may be
/// input or output for each entry point.
const hlsl::SigPoint *sigPoint;
/// Information about HLSL semantic string.
SemanticInfo semanticInfo;
/// SPIR-V BuiltIn attribute.
const VKBuiltInAttr *builtinAttr;
/// The AST QualType.
QualType type;
/// SPIR-V instruction.
SpirvVariable *value;
/// Indicates whether this stage variable should be a SPIR-V builtin.
bool isBuiltin;
/// SPIR-V storage class this stage variable belongs to.
spv::StorageClass storageClass;
/// Location assignment if input/output variable.
const VKLocationAttr *location;
/// Index assignment if PS output variable
const VKIndexAttr *indexAttr;
/// How many locations and components this stage variable takes.
LocationAndComponent locationAndComponentCount;
/// Entry point for this stage variable. If this stage variable is not
/// specific for an entry point e.g., built-in, it must be nullptr.
SpirvFunction *entryPoint;
bool locOrBuiltinDecorateAttr;
};
/// \brief The struct containing information of stage variable's location and
/// index. This information will be used to check the duplication of stage
/// variable's location and index.
struct StageVariableLocationInfo {
SpirvFunction *entryPoint;
spv::StorageClass sc;
uint32_t location;
uint32_t index;
static inline StageVariableLocationInfo getEmptyKey() {
return {nullptr, spv::StorageClass::Max, 0, 0};
}
static inline StageVariableLocationInfo getTombstoneKey() {
return {nullptr, spv::StorageClass::Max, 0xffffffff, 0xffffffff};
}
static unsigned getHashValue(const StageVariableLocationInfo &Val) {
return llvm::hash_combine(Val.entryPoint) ^
llvm::hash_combine(Val.location) ^ llvm::hash_combine(Val.index) ^
llvm::hash_combine(static_cast<uint32_t>(Val.sc));
}
static bool isEqual(const StageVariableLocationInfo &LHS,
const StageVariableLocationInfo &RHS) {
return LHS.entryPoint == RHS.entryPoint && LHS.sc == RHS.sc &&
LHS.location == RHS.location && LHS.index == RHS.index;
}
};
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/RelaxedPrecisionVisitor.cpp | //===--- RelaxedPrecisionVisitor.cpp - RelaxedPrecision Visitor --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "RelaxedPrecisionVisitor.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvBuilder.h"
namespace clang {
namespace spirv {
bool RelaxedPrecisionVisitor::visit(SpirvFunction *fn, Phase phase) {
assert(fn);
if (phase == Visitor::Phase::Init)
if (isRelaxedPrecisionType(fn->getAstReturnType(), spvOptions))
fn->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvVectorShuffle *inst) {
// The result of vector shuffle must have RelaxedPrecision if the chosen
// elements come from a vector that is RelaxedPrecision.
auto *vec1 = inst->getVec1();
auto *vec2 = inst->getVec2();
const auto vec1Type = vec1->getAstResultType();
const auto vec2Type = vec2->getAstResultType();
const bool isVec1Relaxed = isRelaxedPrecisionType(vec1Type, spvOptions);
const bool isVec2Relaxed = isRelaxedPrecisionType(vec2Type, spvOptions);
uint32_t vec1Size;
uint32_t vec2Size;
(void)isVectorType(vec1Type, nullptr, &vec1Size);
(void)isVectorType(vec2Type, nullptr, &vec2Size);
bool vec1ElemUsed = false;
bool vec2ElemUsed = false;
for (auto component : inst->getComponents()) {
if (component < vec1Size)
vec1ElemUsed = true;
else
vec2ElemUsed = true;
}
const bool onlyVec1Used = vec1ElemUsed && !vec2ElemUsed;
const bool onlyVec2Used = vec2ElemUsed && !vec1ElemUsed;
if ((onlyVec1Used && isVec1Relaxed) || (onlyVec2Used && isVec2Relaxed) ||
(vec1ElemUsed && vec2ElemUsed && isVec1Relaxed && isVec2Relaxed))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvUnaryOp *inst) {
// For conversion operations, check the result QualType. For example: if we
// are converting from min12int to int, the result should no longet get
// RelaxedPrecision.
switch (inst->getopcode()) {
case spv::Op::OpBitcast:
case spv::Op::OpFConvert:
case spv::Op::OpSConvert:
case spv::Op::OpUConvert: {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions)) {
inst->setRelaxedPrecision();
}
return true;
}
default:
break;
}
// If the argument of the unary operation is RelaxedPrecision and the unary
// operation is operating on numerical values, the result is also
// RelaxedPrecision.
if (inst->getOperand()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand()->getAstResultType()))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvBinaryOp *inst) {
// If either argument of the binary operation is RelaxedPrecision, and the
// binary operation is operating on numerical values, the result is also
// RelaxedPrecision.
if (inst->getOperand1()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand1()->getAstResultType()) &&
inst->getOperand2()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand2()->getAstResultType()))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvSpecConstantUnaryOp *inst) {
// If the argument of the unary operation is RelaxedPrecision and the unary
// operation is operating on numerical values, the result is also
// RelaxedPrecision.
if (inst->getOperand()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand()->getAstResultType()))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvSpecConstantBinaryOp *inst) {
// If either argument of the binary operation is RelaxedPrecision, and the
// binary operation is operating on numerical values, the result is also
// RelaxedPrecision.
if (inst->getOperand1()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand1()->getAstResultType()) &&
inst->getOperand2()->isRelaxedPrecision() &&
isScalarOrNonStructAggregateOfNumericalTypes(
inst->getOperand2()->getAstResultType()))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvLoad *inst) {
// If loading from a RelaxedPrecision variable, the result is also decorated
// with RelaxedPrecision.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvStore *inst) { return true; }
bool RelaxedPrecisionVisitor::visit(SpirvSelect *inst) {
if (inst->getTrueObject()->isRelaxedPrecision() &&
inst->getFalseObject()->isRelaxedPrecision())
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvFunctionCall *inst) {
// If the return type of the function is RelaxedPrecision, we can decorate the
// result-id of the OpFunctionCall.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvExtInst *inst) {
// If all operands to numeric instructions in GLSL extended instruction set is
// RelaxedPrecision, the result of the opration is also RelaxedPrecision.
if (inst->getInstructionSet()->getExtendedInstSetName() == "GLSL.std.450") {
const auto &operands = inst->getOperands();
if (std::all_of(operands.begin(), operands.end(), [](SpirvInstruction *op) {
return op->isRelaxedPrecision();
})) {
inst->setRelaxedPrecision();
}
}
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvCompositeInsert *inst) {
// If inserting a RelaxedPrecision object into a composite, check the type of
// the resulting composite. For example: if you are inserting a
// RelaxedPrecision object as a member into a structure, the resulting
// structure type is not RelaxedPrecision. But, if you are inserting a
// RelaxedPrecision object into a vector of RelaxedPrecision integers, the
// resulting composite *is* RelaxedPrecision.
// In short, it simply depends on the composite type.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvCompositeExtract *inst) {
// If extracting a RelaxedPrecision object from a composite, check the type of
// the extracted object. For example: if extracting different members of a
// structure, depending on the member, you may or may not want to apply the
// RelaxedPrecision decoration.
// In short, it simply depends on the type of what you have extracted.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvCompositeConstruct *inst) {
// When constructing a composite, only look at the type of the resulting
// composite.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvConstantBoolean *) {
// Booleans do not have precision!
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvConstantInteger *inst) {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvConstantFloat *inst) {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvConstantComposite *inst) {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvBitFieldExtract *inst) {
if (inst->getBase()->isRelaxedPrecision())
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvBitFieldInsert *inst) {
if (inst->getBase()->isRelaxedPrecision())
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvAtomic *inst) {
// If the original pointer is RelaxedPrecision or operating on a value that is
// RelaxedPrecision, result is RelaxedPrecision.
if (inst->getPointer()->isRelaxedPrecision()) {
if (!inst->hasValue() ||
(inst->hasValue() && inst->getValue()->isRelaxedPrecision()))
inst->setRelaxedPrecision();
}
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvAccessChain *) {
// The access chain operation itself is irrelevant regarding precision.
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvFunctionParameter *inst) {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvVariable *inst) {
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
bool RelaxedPrecisionVisitor::visit(SpirvImageOp *inst) {
// Since OpImageWrite does not have result type, it must not be decorated with
// the RelaxedPrecision.
if (inst->getopcode() == spv::Op::OpImageWrite)
return true;
// If the operation result type or the underlying image type is relaxed
// precision, the instruction can be considered relaxed precision.
if (isRelaxedPrecisionType(inst->getAstResultType(), spvOptions) ||
isRelaxedPrecisionType(inst->getImage()->getAstResultType(), spvOptions))
inst->setRelaxedPrecision();
return true;
}
} // end namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/PervertexInputVisitor.cpp | //===--- PervertexInputVisitor.cpp ---- PerVertex Input Visitor --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "PervertexInputVisitor.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvFunction.h"
#include "clang/SPIRV/SpirvType.h"
#include <stack>
namespace clang {
namespace spirv {
///< For expanded variables, we need to decide where to add an extra index zero
///< for SpirvAccessChain and SpirvCompositeExtract. This comes to three access
///< cases : 1. array element. 2. structure member 3. vector channel.
int PervertexInputVisitor::appendIndexZeroAt(
QualType baseType, llvm::ArrayRef<uint32_t> instIndexes) {
if (instIndexes.size() == 0)
return 0;
///< 1. Array element
if (baseType.getTypePtr()->isArrayType()) {
int delta =
appendIndexZeroAt(baseType->getAsArrayTypeUnsafe()->getElementType(),
instIndexes.slice(1));
if (delta == 0)
// swizzling at an array element in lowest level.
return 0;
else
// intermediate array/struct element access.
return 1 + delta;
}
///< 2. structure member
if (baseType.getTypePtr()->isStructureType()) {
uint32_t idx = instIndexes[0];
for (auto *f : baseType->getAs<RecordType>()->getDecl()->fields()) {
if (idx == 0) {
return 1 + appendIndexZeroAt(f->getType(), instIndexes.slice(1));
break;
}
idx--;
}
}
///< 3. vector channel.
return 0;
}
///< Expand nointerpolation decorated variables/parameters.
///< If a variable/parameter is passed from a decorated inputs, it should be
///< treated as nointerpolated too.
bool PervertexInputVisitor::expandNointerpVarAndParam(
SpirvInstruction *spvInst) {
// If there is no AST type, it means the type was already lowered from some
// expression/construct. If it required expansion, it should be done already.
if (!spvInst->hasAstResultType())
return spvInst->isNoninterpolated();
QualType type = spvInst->getAstResultType();
bool isExpanded = false;
auto typePtr = type.getTypePtr();
if (typePtr->isStructureType()) {
/// Structure
isExpanded = expandNointerpStructure(type, spvInst->isNoninterpolated());
} else if (spvInst->isNoninterpolated()) {
/// Normal type
isExpanded = true;
type = astContext.getConstantArrayType(type, llvm::APInt(32, 3),
clang::ArrayType::Normal, 0);
spvInst->setAstResultType(type);
}
return isExpanded;
}
bool PervertexInputVisitor::expandNointerpStructure(QualType type,
bool isVarDecoratedInterp) {
QualType currentType = type;
auto typePtr = type.getTypePtr();
if (m_expandedStructureType.count(typePtr) > 0)
return true;
bool hasNoInterp = false;
if (typePtr->isStructureType()) {
const auto *structDecl = type->getAs<RecordType>()->getDecl();
bool structTypeNoInterp = structDecl->hasAttr<HLSLNoInterpolationAttr>();
uint32_t i = 0;
for (auto *field : structDecl->fields()) {
bool expandElem = (isVarDecoratedInterp || structTypeNoInterp ||
field->hasAttr<HLSLNoInterpolationAttr>());
if (field->getType().getTypePtr()->isStructureType())
expandNointerpStructure(field->getType(), expandElem);
else if (expandElem) {
currentType = astContext.getConstantArrayType(
field->getType(), llvm::APInt(32, 3), clang::ArrayType::Normal, 0);
field->setType(currentType);
hasNoInterp = true;
}
i++;
}
}
if (hasNoInterp)
m_expandedStructureType.insert(typePtr);
return hasNoInterp;
}
///< Get mapped operand used to replace original operand, if not exists, return
///< itself.
SpirvInstruction *
PervertexInputVisitor::getMappedReplaceInstr(SpirvInstruction *i) {
auto *replacedInstr = m_instrReplaceMap.lookup(i);
if (replacedInstr)
return replacedInstr;
else
return i;
}
///< Add temp function variables, for operand replacement. An original usage to
///< a nointerpolated variable/parameter should be treated as an access to
///< its first element after expanding (data at first provoking vertex).
SpirvInstruction *
PervertexInputVisitor::createFirstPerVertexVar(SpirvInstruction *base,
llvm::StringRef varName) {
auto loc = base->getSourceLocation();
auto *vtx = addFunctionTempVar(varName.str(), base->getAstResultType(), loc,
base->isPrecise());
createVertexStore(vtx, createVertexLoad(base));
return vtx;
}
SpirvInstruction *PervertexInputVisitor::createVertexAccessChain(
QualType resultType, SpirvInstruction *base,
llvm::ArrayRef<SpirvInstruction *> indexes) {
auto loc = base->getSourceLocation();
auto range = base->getSourceRange();
SpirvInstruction *instruction =
new (context) SpirvAccessChain(resultType, loc, base, indexes, range);
instruction->setStorageClass(spv::StorageClass::Function);
instruction->setLayoutRule(base->getLayoutRule());
instruction->setContainsAliasComponent(base->containsAliasComponent());
instruction->setNoninterpolated(false);
currentFunc->addToInstructionCache(instruction);
return instruction;
}
SpirvInstruction *PervertexInputVisitor::createProvokingVertexAccessChain(
SpirvInstruction *base, uint32_t index, QualType resultType) {
llvm::SmallVector<SpirvInstruction *, 1> indexes;
indexes.push_back(spirvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, index)));
SpirvInstruction *instruction =
createVertexAccessChain(resultType, base, indexes);
return instruction;
}
SpirvVariable *
PervertexInputVisitor::addFunctionTempVar(llvm::StringRef varName,
QualType valueType,
SourceLocation loc, bool isPrecise) {
QualType varType = valueType;
if (varType.getTypePtr()->isPointerType())
varType = varType.getTypePtr()->getPointeeType();
SpirvVariable *var = new (context) SpirvVariable(
varType, loc, spv::StorageClass::Function, isPrecise, false, nullptr);
var->setDebugName(varName);
currentFunc->addVariable(var);
return var;
}
///< When use temp variables within a function, we need to add load/store ops.
///< TIP: A nointerpolated input or function parameter will be treated as
///< input.vtx0 within current function, but would be treated as an array will
///< pass to a function call.
SpirvInstruction *
PervertexInputVisitor::createVertexLoad(SpirvInstruction *base) {
SpirvInstruction *loadPtr = new (context)
SpirvLoad(base->getAstResultType(), base->getSourceLocation(), base,
base->getSourceRange());
loadPtr->setStorageClass(spv::StorageClass::Function);
loadPtr->setLayoutRule(base->getLayoutRule());
loadPtr->setRValue(true);
currentFunc->addToInstructionCache(loadPtr);
return loadPtr;
}
void PervertexInputVisitor::createVertexStore(SpirvInstruction *pt,
SpirvInstruction *obj) {
auto *storeInstr = new (context) SpirvStore(pt->getSourceLocation(), pt, obj);
currentFunc->addToInstructionCache(storeInstr);
}
// Don't add extra index to a simple vector/matrix elem access when base is not
// expanded. Like:
// %99 = OpAccessChain %_ptr_Function_v3bool %input %int_2 %uint_0
// %101 = OpAccessChain % _ptr_Function_bool % 99 % int_1
bool PervertexInputVisitor::isNotExpandedVectorAccess(QualType baseType,
QualType resultType) {
QualType elemType = {};
return (isVectorType(baseType, &elemType) ||
isMxNMatrix(baseType, &elemType)) &&
elemType == resultType && !baseType.getTypePtr()->isArrayType();
}
bool PervertexInputVisitor::visit(SpirvModule *m, Phase phase) {
if (!m->isPerVertexInterpMode())
return false;
currentMod = m;
return true;
}
bool PervertexInputVisitor::visit(SpirvEntryPoint *ep) {
// Add variable mapping here. First function would be main.
currentFunc = ep->getEntryPoint();
/// Refine stage in/out variables
for (auto *var : currentMod->getVariables()) {
if (!var->isNoninterpolated() ||
var->getStorageClass() != spv::StorageClass::Input)
continue;
auto *stgIOLoadPtr = spirvBuilder.getPerVertexStgInput(var);
if (!isa<SpirvBinaryOp>(stgIOLoadPtr)) {
stgIOLoadPtr->setAstResultType(var->getAstResultType());
}
}
return true;
}
bool PervertexInputVisitor::visit(SpirvFunction *sf, Phase phase) {
// Add variable mapping here. First function would be main.
currentFunc = sf;
inEntryFunctionWrapper = false;
if (sf->isEntryFunctionWrapper()) {
if (phase != Phase::Done) {
inEntryFunctionWrapper = true;
return true;
}
}
if (phase == Phase::Done) {
currentFunc->addInstrCacheToFront();
} else {
// Refine variables and parameters. Add vtx0 for them.
// (Those param and var haven't been expanded at this point).
for (auto *var : currentFunc->getVariables()) {
if (!var->isNoninterpolated())
continue;
auto *vtx0 =
createProvokingVertexAccessChain(var, 0, var->getAstResultType());
m_instrReplaceMap[var] = vtx0;
}
for (auto *param : currentFunc->getParameters()) {
if (!param->isNoninterpolated() ||
param->getAstResultType().getTypePtr()->isStructureType())
continue;
auto *vtx0 =
createProvokingVertexAccessChain(param, 0, param->getAstResultType());
auto paramName = param->getDebugName().str() + "_perVertexParam0";
m_instrReplaceMap[param] = createFirstPerVertexVar(vtx0, paramName);
}
}
return true;
}
/// Spirv Instruction check and pointer replacement if needed.
bool PervertexInputVisitor::visit(SpirvVariable *inst) {
if (expandNointerpVarAndParam(inst) &&
inst->getStorageClass() == spv::StorageClass::Input)
spirvBuilder.decoratePerVertexKHR(inst, inst->getSourceLocation());
return true;
}
bool PervertexInputVisitor::visit(SpirvFunctionParameter *inst) {
expandNointerpVarAndParam(inst);
return true;
}
bool PervertexInputVisitor::visit(SpirvCompositeExtract *inst) {
if (inst->isNoninterpolated() &&
!isNotExpandedVectorAccess(inst->getComposite()->getAstResultType(),
inst->getAstResultType())) {
int idx = appendIndexZeroAt(inst->getComposite()->getAstResultType(),
inst->getIndexes());
inst->insertIndex(0, idx);
inst->setNoninterpolated(false);
}
return true;
}
bool PervertexInputVisitor::visit(SpirvAccessChain *inst) {
llvm::SmallVector<uint32_t, 4> indexes;
SpirvInstruction *zero =
spirvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto idx = dyn_cast<SpirvAccessChain>(inst)->getIndexes();
for (auto i : idx) {
if (isa<SpirvConstantInteger>(i)) {
indexes.push_back(
dyn_cast<SpirvConstantInteger>(i)->getValue().getZExtValue());
}
}
if (inst->isNoninterpolated() &&
!isNotExpandedVectorAccess(inst->getBase()->getAstResultType(),
inst->getAstResultType())) {
// Not add extra index to a vector channel access.
int idx = appendIndexZeroAt(inst->getBase()->getAstResultType(), indexes);
inst->insertIndex(zero, idx);
inst->setNoninterpolated(false);
}
return true;
}
// Only replace argument if not in entry function.
// If an expanded variable/parameter is passed to a function,
// recreate a pair of Store/Load instructions.
bool PervertexInputVisitor::visit(SpirvFunctionCall *inst) {
/// Replace each use of pervertex inputs with its vertex0 element within
/// functions. But pass it as an array if meet function call.
if (inEntryFunctionWrapper)
return true;
/// Load/Store instructions related to this argument may have been replaced
/// with other instructions, so we need to get its original mapped variables.
unsigned argIndex = 0;
for (auto *arg : inst->getArgs()) {
auto paramVar = currentFunc->getMappedFuncParam(arg);
if (paramVar) {
if (isa<SpirvAccessChain>(paramVar)) {
auto tempVar = paramVar;
while (isa<SpirvAccessChain>(tempVar)) {
tempVar = dyn_cast<SpirvAccessChain>(tempVar)->getBase();
}
if (tempVar->isNoninterpolated()) {
/// When function parameters have a structure type, some local
/// variables may be created and mapped to an stage inputs
/// in 'src.main' block.
///
/// We use first vertex value of those non-interpolated inputs to
/// replace normal usage of those local variables in HLSL and SPIRV.
///
/// But when those variables are then used in a function call as
/// its arguments, we need to copy the values for all of the vertices
/// to the local variable. This means copying an entire array.
///
/// At this point, original access chain to those member variables
/// have been appended an zero index at the end to access first
/// vertex for replacement before.
///
/// Hence we need to recreate a new access chain instruction and
/// and pass argument as an array to this function call.
auto paramAccessChain = dyn_cast<SpirvAccessChain>(paramVar);
auto indexes = paramAccessChain->getIndexes();
auto elemType = astContext.getConstantArrayType(
paramAccessChain->getAstResultType(), llvm::APInt(32, 3),
clang::ArrayType::Normal, 0);
llvm::SmallVector<SpirvInstruction *, 4> indices(indexes.begin(),
indexes.end());
indices.pop_back();
paramVar = createVertexAccessChain(
elemType, paramAccessChain->getBase(), indices);
}
}
createVertexStore(arg, createVertexLoad(paramVar));
}
auto funcParam = inst->getFunction()->getParameters()[argIndex];
if (arg->isNoninterpolated()) {
/// Broadcast nointerpolated flag to each called function which uses a
/// nointerpolated variable as its functionCall parameter within a call
/// chain.
funcParam->setNoninterpolated();
}
paramCaller[funcParam].push_back(arg);
if (funcParam->isNoninterpolated()) {
/// Error: this broadcast process is from top to lower, hence this
/// argument should be noninterpolated (will be expanded) here.
/// When any matched param is noninterpolated, it means one or more
/// noninterpolated variable will be passed as an expanded array
for (auto caller : paramCaller[funcParam])
if (!caller->isNoninterpolated()) {
emitError("Function '%0' could only use noninterpolated variable "
"as input.",
caller->getSourceLocation())
<< inst->getFunction()->getFunctionName().data();
return 0;
}
}
argIndex++;
}
currentFunc->addInstrCacheToFront();
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvEmitter.cpp | //===------- SpirvEmitter.cpp - SPIR-V Binary Code Emitter ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements a SPIR-V emitter class that takes in HLSL AST and emits
// SPIR-V binary words.
//
//===----------------------------------------------------------------------===//
#include "SpirvEmitter.h"
#include "AlignmentSizeCalculator.h"
#include "InitListHandler.h"
#include "LowerTypeVisitor.h"
#include "RawBufferMethods.h"
#include "dxc/DXIL/DxilConstants.h"
#include "dxc/HlslIntrinsicOp.h"
#include "spirv-tools/optimizer.hpp"
#include "clang/AST/HlslTypes.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/String.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#ifdef SUPPORT_QUERY_GIT_COMMIT_INFO
#include "clang/Basic/Version.h"
#else
namespace clang {
uint32_t getGitCommitCount() { return 0; }
const char *getGitCommitHash() { return "<unknown-hash>"; }
} // namespace clang
#endif // SUPPORT_QUERY_GIT_COMMIT_INFO
namespace clang {
namespace spirv {
using spvtools::opt::DescriptorSetAndBinding;
namespace {
// Returns true if the given decl is an implicit variable declaration inside the
// "vk" namespace.
bool isImplicitVarDeclInVkNamespace(const Decl *decl) {
if (!decl)
return false;
if (auto *varDecl = dyn_cast<VarDecl>(decl)) {
// Check whether it is implicitly defined.
if (!decl->isImplicit())
return false;
if (auto *nsDecl = dyn_cast<NamespaceDecl>(varDecl->getDeclContext()))
if (nsDecl->getName().equals("vk"))
return true;
}
return false;
}
// Returns true if the given decl has the given semantic.
bool hasSemantic(const DeclaratorDecl *decl,
hlsl::DXIL::SemanticKind semanticKind) {
using namespace hlsl;
for (auto *annotation : decl->getUnusualAnnotations()) {
if (auto *semanticDecl = dyn_cast<SemanticDecl>(annotation)) {
llvm::StringRef semanticName;
uint32_t semanticIndex = 0;
Semantic::DecomposeNameAndIndex(semanticDecl->SemanticName, &semanticName,
&semanticIndex);
const auto *semantic = Semantic::GetByName(semanticName);
if (semantic->GetKind() == semanticKind)
return true;
}
}
return false;
}
const ParmVarDecl *patchConstFuncTakesHullOutputPatch(FunctionDecl *pcf) {
for (const auto *param : pcf->parameters())
if (hlsl::IsHLSLOutputPatchType(param->getType()))
return param;
return nullptr;
}
inline bool isSpirvMatrixOp(spv::Op opcode) {
return opcode == spv::Op::OpMatrixTimesMatrix ||
opcode == spv::Op::OpMatrixTimesVector ||
opcode == spv::Op::OpMatrixTimesScalar;
}
/// If expr is a (RW)StructuredBuffer.Load(), returns the object and writes
/// index. Otherwiser, returns false.
// TODO: The following doesn't handle Load(int, int) yet. And it is basically a
// duplicate of doCXXMemberCallExpr.
const Expr *isStructuredBufferLoad(const Expr *expr, const Expr **index) {
using namespace hlsl;
if (const auto *indexing = dyn_cast<CXXMemberCallExpr>(expr)) {
const auto *callee = indexing->getDirectCallee();
uint32_t opcode = static_cast<uint32_t>(IntrinsicOp::Num_Intrinsics);
llvm::StringRef group;
if (GetIntrinsicOp(callee, opcode, group)) {
if (static_cast<IntrinsicOp>(opcode) == IntrinsicOp::MOP_Load) {
const auto *object = indexing->getImplicitObjectArgument();
if (isStructuredBuffer(object->getType())) {
*index = indexing->getArg(0);
return indexing->getImplicitObjectArgument();
}
}
}
}
return nullptr;
}
/// Returns true if
/// * the given expr is an DeclRefExpr referencing a kind of structured or byte
/// buffer and it is non-alias one, or
/// * the given expr is an CallExpr returning a kind of structured or byte
/// buffer.
/// * the given expr is an ArraySubscriptExpr referencing a kind of structured
/// or byte buffer.
///
/// Note: legalization specific code
bool isReferencingNonAliasStructuredOrByteBuffer(const Expr *expr) {
expr = expr->IgnoreParenCasts();
if (const auto *declRefExpr = dyn_cast<DeclRefExpr>(expr)) {
if (const auto *varDecl = dyn_cast<VarDecl>(declRefExpr->getFoundDecl()))
if (isAKindOfStructuredOrByteBuffer(varDecl->getType()))
return SpirvEmitter::isExternalVar(varDecl);
} else if (const auto *callExpr = dyn_cast<CallExpr>(expr)) {
if (isAKindOfStructuredOrByteBuffer(callExpr->getType()))
return true;
} else if (isa<ArraySubscriptExpr>(expr)) {
return isAKindOfStructuredOrByteBuffer(expr->getType());
}
return false;
}
/// Translates atomic HLSL opcodes into the equivalent SPIR-V opcode.
spv::Op translateAtomicHlslOpcodeToSpirvOpcode(hlsl::IntrinsicOp opcode) {
using namespace hlsl;
using namespace spv;
switch (opcode) {
case IntrinsicOp::IOP_InterlockedAdd:
case IntrinsicOp::MOP_InterlockedAdd:
return Op::OpAtomicIAdd;
case IntrinsicOp::IOP_InterlockedAnd:
case IntrinsicOp::MOP_InterlockedAnd:
return Op::OpAtomicAnd;
case IntrinsicOp::IOP_InterlockedOr:
case IntrinsicOp::MOP_InterlockedOr:
return Op::OpAtomicOr;
case IntrinsicOp::IOP_InterlockedXor:
case IntrinsicOp::MOP_InterlockedXor:
return Op::OpAtomicXor;
case IntrinsicOp::IOP_InterlockedUMax:
case IntrinsicOp::MOP_InterlockedUMax:
return Op::OpAtomicUMax;
case IntrinsicOp::IOP_InterlockedUMin:
case IntrinsicOp::MOP_InterlockedUMin:
return Op::OpAtomicUMin;
case IntrinsicOp::IOP_InterlockedMax:
case IntrinsicOp::MOP_InterlockedMax:
return Op::OpAtomicSMax;
case IntrinsicOp::IOP_InterlockedMin:
case IntrinsicOp::MOP_InterlockedMin:
return Op::OpAtomicSMin;
case IntrinsicOp::IOP_InterlockedExchange:
case IntrinsicOp::MOP_InterlockedExchange:
return Op::OpAtomicExchange;
default:
// Only atomic opcodes are relevant.
break;
}
assert(false && "unimplemented hlsl intrinsic opcode");
return Op::Max;
}
// Returns true if the given opcode is an accepted binary opcode in
// OpSpecConstantOp.
bool isAcceptedSpecConstantBinaryOp(spv::Op op) {
switch (op) {
case spv::Op::OpIAdd:
case spv::Op::OpISub:
case spv::Op::OpIMul:
case spv::Op::OpUDiv:
case spv::Op::OpSDiv:
case spv::Op::OpUMod:
case spv::Op::OpSRem:
case spv::Op::OpSMod:
case spv::Op::OpShiftRightLogical:
case spv::Op::OpShiftRightArithmetic:
case spv::Op::OpShiftLeftLogical:
case spv::Op::OpBitwiseOr:
case spv::Op::OpBitwiseXor:
case spv::Op::OpBitwiseAnd:
case spv::Op::OpVectorShuffle:
case spv::Op::OpCompositeExtract:
case spv::Op::OpCompositeInsert:
case spv::Op::OpLogicalOr:
case spv::Op::OpLogicalAnd:
case spv::Op::OpLogicalNot:
case spv::Op::OpLogicalEqual:
case spv::Op::OpLogicalNotEqual:
case spv::Op::OpIEqual:
case spv::Op::OpINotEqual:
case spv::Op::OpULessThan:
case spv::Op::OpSLessThan:
case spv::Op::OpUGreaterThan:
case spv::Op::OpSGreaterThan:
case spv::Op::OpULessThanEqual:
case spv::Op::OpSLessThanEqual:
case spv::Op::OpUGreaterThanEqual:
case spv::Op::OpSGreaterThanEqual:
return true;
default:
// Accepted binary opcodes return true. Anything else is false.
return false;
}
return false;
}
/// Returns true if the given expression is an accepted initializer for a spec
/// constant.
bool isAcceptedSpecConstantInit(const Expr *init, ASTContext &astContext) {
// Allow numeric casts
init = init->IgnoreParenCasts();
if (isa<CXXBoolLiteralExpr>(init) || isa<IntegerLiteral>(init) ||
isa<FloatingLiteral>(init))
return true;
// Allow the minus operator which is used to specify negative values
if (const auto *unaryOp = dyn_cast<UnaryOperator>(init))
return unaryOp->getOpcode() == UO_Minus &&
isAcceptedSpecConstantInit(unaryOp->getSubExpr(), astContext);
// Allow values that can be evaluated to const.
if (init->isEvaluatable(astContext)) {
return true;
}
return false;
}
/// Returns true if the given function parameter can act as shader stage
/// input parameter.
inline bool canActAsInParmVar(const ParmVarDecl *param) {
// If the parameter has no in/out/inout attribute, it is defaulted to
// an in parameter.
return !param->hasAttr<HLSLOutAttr>() &&
// GS output streams are marked as inout, but it should not be
// used as in parameter.
!hlsl::IsHLSLStreamOutputType(param->getType());
}
/// Returns true if the given function parameter can act as shader stage
/// output parameter.
inline bool canActAsOutParmVar(const ParmVarDecl *param) {
return param->hasAttr<HLSLOutAttr>() || param->hasAttr<HLSLInOutAttr>() ||
hlsl::IsHLSLRayQueryType(param->getType());
}
/// Returns true if the given expression is of builtin type and can be evaluated
/// to a constant zero. Returns false otherwise.
inline bool evaluatesToConstZero(const Expr *expr, ASTContext &astContext) {
const auto type = expr->getType();
if (!type->isBuiltinType())
return false;
Expr::EvalResult evalResult;
if (expr->EvaluateAsRValue(evalResult, astContext) &&
!evalResult.HasSideEffects) {
const auto &val = evalResult.Val;
return ((type->isBooleanType() && !val.getInt().getBoolValue()) ||
(type->isIntegerType() && !val.getInt().getBoolValue()) ||
(type->isFloatingType() && val.getFloat().isZero()));
}
return false;
}
/// Returns the real definition of the callee of the given CallExpr.
///
/// If we are calling a forward-declared function, callee will be the
/// FunctionDecl for the foward-declared function, not the actual
/// definition. The foward-delcaration and defintion are two completely
/// different AST nodes.
inline const FunctionDecl *getCalleeDefinition(const CallExpr *expr) {
const auto *callee = expr->getDirectCallee();
if (callee->isThisDeclarationADefinition())
return callee;
// We need to update callee to the actual definition here
if (!callee->isDefined(callee))
return nullptr;
return callee;
}
/// Returns the referenced definition. The given expr is expected to be a
/// DeclRefExpr or CallExpr after ignoring casts. Returns nullptr otherwise.
const DeclaratorDecl *getReferencedDef(const Expr *expr) {
if (!expr)
return nullptr;
expr = expr->IgnoreParenCasts();
while (const auto *arraySubscriptExpr = dyn_cast<ArraySubscriptExpr>(expr)) {
expr = arraySubscriptExpr->getBase();
expr = expr->IgnoreParenCasts();
}
if (const auto *declRefExpr = dyn_cast<DeclRefExpr>(expr)) {
return dyn_cast_or_null<DeclaratorDecl>(declRefExpr->getDecl());
}
if (const auto *callExpr = dyn_cast<CallExpr>(expr)) {
return getCalleeDefinition(callExpr);
}
return nullptr;
}
/// Returns the number of base classes if this type is a derived class/struct.
/// Returns zero otherwise.
inline uint32_t getNumBaseClasses(QualType type) {
if (const auto *cxxDecl = type->getAsCXXRecordDecl())
return cxxDecl->getNumBases();
return 0;
}
/// Gets the index sequence of casting a derived object to a base object by
/// following the cast chain.
void getBaseClassIndices(const CastExpr *expr,
llvm::SmallVectorImpl<uint32_t> *indices) {
assert(expr->getCastKind() == CK_UncheckedDerivedToBase ||
expr->getCastKind() == CK_HLSLDerivedToBase);
indices->clear();
QualType derivedType = expr->getSubExpr()->getType();
// There are two types of UncheckedDerivedToBase/HLSLDerivedToBase casts:
//
// The first is when a derived object tries to access a member in the base.
// For example: derived.base_member.
// ImplicitCastExpr 'Base' lvalue <UncheckedDerivedToBase (Base)>
// `-DeclRefExpr 'Derived' lvalue Var 0x1f0d9bb2890 'derived' 'Derived'
//
// The second is when a pointer of the dervied is used to access members or
// methods of the base. There are currently no pointers in HLSL, but the
// method defintions can use the "this" pointer.
// For example:
// class Base { float value; };
// class Derviced : Base {
// float4 getBaseValue() { return value; }
// };
//
// In this example, the 'this' pointer (pointing to Derived) is used inside
// 'getBaseValue', which is then cast to a Base pointer:
//
// ImplicitCastExpr 'Base *' <UncheckedDerivedToBase (Base)>
// `-CXXThisExpr 'Derviced *' this
//
// Therefore in order to obtain the derivedDecl below, we must make sure that
// we handle the second case too by using the pointee type.
if (derivedType->isPointerType())
derivedType = derivedType->getPointeeType();
const auto *derivedDecl = derivedType->getAsCXXRecordDecl();
// Go through the base cast chain: for each of the derived to base cast, find
// the index of the base in question in the derived's bases.
for (auto pathIt = expr->path_begin(), pathIe = expr->path_end();
pathIt != pathIe; ++pathIt) {
// The type of the base in question
const auto baseType = (*pathIt)->getType();
uint32_t index = 0;
for (auto baseIt = derivedDecl->bases_begin(),
baseIe = derivedDecl->bases_end();
baseIt != baseIe; ++baseIt, ++index)
if (baseIt->getType() == baseType) {
indices->push_back(index);
break;
}
assert(index < derivedDecl->getNumBases());
// Continue to proceed the next base in the chain
derivedType = baseType;
if (derivedType->isPointerType())
derivedType = derivedType->getPointeeType();
derivedDecl = derivedType->getAsCXXRecordDecl();
}
}
std::string getNamespacePrefix(const Decl *decl) {
std::string nsPrefix = "";
const DeclContext *dc = decl->getDeclContext();
while (dc && !dc->isTranslationUnit()) {
if (const NamespaceDecl *ns = dyn_cast<NamespaceDecl>(dc)) {
if (!ns->isAnonymousNamespace()) {
nsPrefix = ns->getName().str() + "::" + nsPrefix;
}
}
dc = dc->getParent();
}
return nsPrefix;
}
std::string getFnName(const FunctionDecl *fn) {
// Prefix the function name with the struct name if necessary
std::string classOrStructName = "";
if (const auto *memberFn = dyn_cast<CXXMethodDecl>(fn))
if (const auto *st = dyn_cast<CXXRecordDecl>(memberFn->getDeclContext()))
classOrStructName = st->getName().str() + ".";
return getNamespacePrefix(fn) + classOrStructName +
getFunctionOrOperatorName(fn, false);
}
bool isMemoryObjectDeclaration(SpirvInstruction *inst) {
return isa<SpirvVariable>(inst) || isa<SpirvFunctionParameter>(inst);
}
// Returns a pair of the descriptor set and the binding that does not have
// bound Texture or Sampler.
DescriptorSetAndBinding getDSetBindingWithoutTextureOrSampler(
const llvm::SmallVectorImpl<ResourceInfoToCombineSampledImage>
&resourceInfoForSampledImages) {
const DescriptorSetAndBinding kNotFound = {
std::numeric_limits<uint32_t>::max(),
std::numeric_limits<uint32_t>::max()};
if (resourceInfoForSampledImages.empty()) {
return kNotFound;
}
typedef uint8_t TextureAndSamplerExistExistance;
const TextureAndSamplerExistExistance kTextureConfirmed = 1 << 0;
const TextureAndSamplerExistExistance kSamplerConfirmed = 1 << 1;
llvm::DenseMap<std::pair<uint32_t, uint32_t>, TextureAndSamplerExistExistance>
dsetBindingsToTextureSamplerExistance;
for (const auto &itr : resourceInfoForSampledImages) {
auto dsetBinding = std::make_pair(itr.descriptorSet, itr.binding);
TextureAndSamplerExistExistance status = 0;
if (isTexture(itr.type))
status = kTextureConfirmed;
if (isSampler(itr.type))
status = kSamplerConfirmed;
auto existanceItr = dsetBindingsToTextureSamplerExistance.find(dsetBinding);
if (existanceItr == dsetBindingsToTextureSamplerExistance.end()) {
dsetBindingsToTextureSamplerExistance[dsetBinding] = status;
} else {
existanceItr->second = existanceItr->second | status;
}
}
for (const auto &itr : dsetBindingsToTextureSamplerExistance) {
if (itr.second != (kTextureConfirmed | kSamplerConfirmed))
return {itr.first.first, itr.first.second};
}
return kNotFound;
}
// Collects pairs of the descriptor set and the binding to combine
// corresponding Texture and Sampler into the sampled image.
std::vector<DescriptorSetAndBinding> collectDSetBindingsToCombineSampledImage(
const llvm::SmallVectorImpl<ResourceInfoToCombineSampledImage>
&resourceInfoForSampledImages) {
std::vector<DescriptorSetAndBinding> dsetBindings;
for (const auto &itr : resourceInfoForSampledImages) {
dsetBindings.push_back({itr.descriptorSet, itr.binding});
}
return dsetBindings;
}
// Returns a scalar unsigned integer type or a vector of them or a matrix of
// them depending on the scalar/vector/matrix type of boolType. The element
// type of boolType must be BuiltinType::Bool type.
QualType getUintTypeForBool(ASTContext &astContext,
CompilerInstance &theCompilerInstance,
QualType boolType) {
assert(isBoolOrVecMatOfBoolType(boolType));
uint32_t vecSize = 1, numRows = 0, numCols = 0;
QualType uintType = astContext.UnsignedIntTy;
if (isScalarType(boolType) || isVectorType(boolType, nullptr, &vecSize)) {
if (vecSize == 1)
return uintType;
else
return astContext.getExtVectorType(uintType, vecSize);
} else {
const bool isMat = isMxNMatrix(boolType, nullptr, &numRows, &numCols);
assert(isMat);
(void)isMat;
const clang::Type *type = boolType.getCanonicalType().getTypePtr();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateSpecDecl =
cast<ClassTemplateSpecializationDecl>(RT->getDecl());
ClassTemplateDecl *templateDecl =
templateSpecDecl->getSpecializedTemplate();
return getHLSLMatrixType(astContext, theCompilerInstance.getSema(),
templateDecl, uintType, numRows, numCols);
}
return QualType();
}
bool isVkRawBufferLoadIntrinsic(const clang::FunctionDecl *FD) {
if (!FD->getName().equals("RawBufferLoad"))
return false;
if (auto *nsDecl = dyn_cast<NamespaceDecl>(FD->getDeclContext()))
if (!nsDecl->getName().equals("vk"))
return false;
return true;
}
// Takes an AST member type, and determines its index in the equivalent SPIR-V
// struct type. This is required as the struct layout might change between the
// AST representation and SPIR-V representation.
uint32_t getFieldIndexInStruct(const StructType *spirvStructType,
const QualType &astStructType,
const FieldDecl *fieldDecl) {
assert(fieldDecl);
const uint32_t indexAST =
getNumBaseClasses(astStructType) + fieldDecl->getFieldIndex();
const auto &fields = spirvStructType->getFields();
assert(indexAST < fields.size());
return fields[indexAST].fieldIndex;
}
// Takes an AST struct type, and lowers is to the equivalent SPIR-V type.
const StructType *lowerStructType(const SpirvCodeGenOptions &spirvOptions,
LowerTypeVisitor &lowerTypeVisitor,
const QualType &structType) {
// If we are accessing a derived struct, we need to account for the number
// of base structs, since they are placed as fields at the beginning of the
// derived struct.
auto baseType = structType;
if (baseType->isPointerType()) {
baseType = baseType->getPointeeType();
}
// The AST type index is not representative of the SPIR-V type index
// because we might squash some fields (bitfields by ex.).
// What we need is to match each AST node with the squashed field and then,
// determine the real index.
const SpirvType *spvType = lowerTypeVisitor.lowerType(
baseType, spirvOptions.sBufferLayoutRule, llvm::None, SourceLocation());
const StructType *output = dyn_cast<StructType>(spvType);
assert(output != nullptr);
return output;
}
} // namespace
SpirvEmitter::SpirvEmitter(CompilerInstance &ci)
: theCompilerInstance(ci), astContext(ci.getASTContext()),
diags(ci.getDiagnostics()),
spirvOptions(ci.getCodeGenOpts().SpirvOptions),
hlslEntryFunctionName(ci.getCodeGenOpts().HLSLEntryFunction),
spvContext(), featureManager(diags, spirvOptions),
spvBuilder(astContext, spvContext, spirvOptions, featureManager),
declIdMapper(astContext, spvContext, spvBuilder, *this, featureManager,
spirvOptions),
constEvaluator(astContext, spvBuilder), entryFunction(nullptr),
curFunction(nullptr), curThis(nullptr), seenPushConstantAt(),
isSpecConstantMode(false), needsLegalization(false),
beforeHlslLegalization(false), mainSourceFile(nullptr) {
// Get ShaderModel from command line hlsl profile option.
const hlsl::ShaderModel *shaderModel =
hlsl::ShaderModel::GetByName(ci.getCodeGenOpts().HLSLProfile.c_str());
if (shaderModel->GetKind() == hlsl::ShaderModel::Kind::Invalid)
emitError("unknown shader module: %0", {}) << shaderModel->GetName();
if (spirvOptions.invertY && !shaderModel->IsVS() && !shaderModel->IsDS() &&
!shaderModel->IsGS() && !shaderModel->IsMS())
emitError("-fvk-invert-y can only be used in VS/DS/GS/MS", {});
if (spirvOptions.useGlLayout && spirvOptions.useDxLayout)
emitError("cannot specify both -fvk-use-dx-layout and -fvk-use-gl-layout",
{});
// Set shader model kind and hlsl major/minor version.
spvContext.setCurrentShaderModelKind(shaderModel->GetKind());
spvContext.setMajorVersion(shaderModel->GetMajor());
spvContext.setMinorVersion(shaderModel->GetMinor());
spirvOptions.signaturePacking =
ci.getCodeGenOpts().HLSLSignaturePackingStrategy ==
(unsigned)hlsl::DXIL::PackingStrategy::Optimized;
if (spirvOptions.useDxLayout) {
spirvOptions.cBufferLayoutRule = SpirvLayoutRule::FxcCTBuffer;
spirvOptions.tBufferLayoutRule = SpirvLayoutRule::FxcCTBuffer;
spirvOptions.sBufferLayoutRule = SpirvLayoutRule::FxcSBuffer;
spirvOptions.ampPayloadLayoutRule = SpirvLayoutRule::FxcSBuffer;
} else if (spirvOptions.useGlLayout) {
spirvOptions.cBufferLayoutRule = SpirvLayoutRule::GLSLStd140;
spirvOptions.tBufferLayoutRule = SpirvLayoutRule::GLSLStd430;
spirvOptions.sBufferLayoutRule = SpirvLayoutRule::GLSLStd430;
spirvOptions.ampPayloadLayoutRule = SpirvLayoutRule::GLSLStd430;
} else if (spirvOptions.useScalarLayout) {
spirvOptions.cBufferLayoutRule = SpirvLayoutRule::Scalar;
spirvOptions.tBufferLayoutRule = SpirvLayoutRule::Scalar;
spirvOptions.sBufferLayoutRule = SpirvLayoutRule::Scalar;
spirvOptions.ampPayloadLayoutRule = SpirvLayoutRule::Scalar;
} else {
spirvOptions.cBufferLayoutRule = SpirvLayoutRule::RelaxedGLSLStd140;
spirvOptions.tBufferLayoutRule = SpirvLayoutRule::RelaxedGLSLStd430;
spirvOptions.sBufferLayoutRule = SpirvLayoutRule::RelaxedGLSLStd430;
spirvOptions.ampPayloadLayoutRule = SpirvLayoutRule::RelaxedGLSLStd430;
}
// Set shader module version, source file name, and source file content (if
// needed).
llvm::StringRef source = "";
std::vector<llvm::StringRef> fileNames;
const auto &inputFiles = ci.getFrontendOpts().Inputs;
// File name
if (spirvOptions.debugInfoFile && !inputFiles.empty()) {
for (const auto &inputFile : inputFiles) {
fileNames.push_back(inputFile.getFile());
}
}
// Source code
if (spirvOptions.debugInfoSource) {
const auto &sm = ci.getSourceManager();
const llvm::MemoryBuffer *mainFile =
sm.getBuffer(sm.getMainFileID(), SourceLocation());
source = StringRef(mainFile->getBufferStart(), mainFile->getBufferSize());
}
mainSourceFile = spvBuilder.setDebugSource(spvContext.getMajorVersion(),
spvContext.getMinorVersion(),
fileNames, source);
// Rich DebugInfo DebugSource
if (spirvOptions.debugInfoRich) {
auto *dbgSrc =
spvBuilder.createDebugSource(mainSourceFile->getString(), source);
// spvContext.getDebugInfo().insert() inserts {string key, RichDebugInfo}
// pair and returns {{string key, RichDebugInfo}, true /*Success*/}.
// spvContext.getDebugInfo().insert().first->second is a RichDebugInfo.
auto *richDebugInfo =
&spvContext.getDebugInfo()
.insert(
{mainSourceFile->getString(),
RichDebugInfo(dbgSrc,
spvBuilder.createDebugCompilationUnit(dbgSrc))})
.first->second;
spvContext.pushDebugLexicalScope(richDebugInfo,
richDebugInfo->scopeStack.back());
}
if (spirvOptions.debugInfoTool && !spirvOptions.debugInfoVulkan &&
featureManager.isTargetEnvVulkan1p1OrAbove()) {
// Emit OpModuleProcessed to indicate the commit information.
std::string commitHash =
std::string("dxc-commit-hash: ") + clang::getGitCommitHash();
spvBuilder.addModuleProcessed(commitHash);
// Emit OpModuleProcessed to indicate the command line options that were
// used to generate this module.
if (!spirvOptions.inputFile.empty() || !spirvOptions.clOptions.empty()) {
// Using this format: "dxc-cl-option: XXXXXX"
std::string clOptionStr =
"dxc-cl-option: " + spirvOptions.inputFile + spirvOptions.clOptions;
spvBuilder.addModuleProcessed(clOptionStr);
}
}
}
std::vector<SpirvVariable *>
SpirvEmitter::getInterfacesForEntryPoint(SpirvFunction *entryPoint) {
auto stageVars = declIdMapper.collectStageVars(entryPoint);
if (!featureManager.isTargetEnvVulkan1p1Spirv1p4OrAbove())
return stageVars;
// In SPIR-V 1.4 or above, we must include global variables in the 'Interface'
// operands of OpEntryPoint. SpirvModule keeps all global variables, but some
// of them can be duplicated with stage variables kept by declIdMapper. Since
// declIdMapper keeps the mapping between variables with Input or Output
// storage class and their storage class, we have to rely on
// declIdMapper.collectStageVars() to collect them.
llvm::SetVector<SpirvVariable *> interfaces(stageVars.begin(),
stageVars.end());
for (auto *moduleVar : spvBuilder.getModule()->getVariables()) {
if (moduleVar->getStorageClass() != spv::StorageClass::Input &&
moduleVar->getStorageClass() != spv::StorageClass::Output) {
if (auto *varEntry =
declIdMapper.getRayTracingStageVarEntryFunction(moduleVar)) {
if (varEntry != entryPoint)
continue;
}
interfaces.insert(moduleVar);
}
}
std::vector<SpirvVariable *> interfacesInVector;
interfacesInVector.reserve(interfaces.size());
for (auto *interface : interfaces) {
interfacesInVector.push_back(interface);
}
return interfacesInVector;
}
void SpirvEmitter::beginInvocationInterlock(SourceLocation loc,
SourceRange range) {
spvBuilder.addExecutionMode(
entryFunction, declIdMapper.getInterlockExecutionMode(), {}, loc);
spvBuilder.createBeginInvocationInterlockEXT(loc, range);
needsLegalization = true;
}
llvm::StringRef SpirvEmitter::getEntryPointName(const FunctionInfo *entryInfo) {
llvm::StringRef entrypointName = entryInfo->funcDecl->getName();
// If this is the -E HLSL entrypoint and -fspv-entrypoint-name was set,
// rename the SPIR-V entrypoint.
if (entrypointName == hlslEntryFunctionName &&
!spirvOptions.entrypointName.empty()) {
return spirvOptions.entrypointName;
}
return entrypointName;
}
void SpirvEmitter::HandleTranslationUnit(ASTContext &context) {
// Stop translating if there are errors in previous compilation stages.
if (context.getDiagnostics().hasErrorOccurred())
return;
if (spirvOptions.debugInfoRich && !spirvOptions.debugInfoVulkan) {
emitWarning(
"Member functions will not be linked to their class in the "
"debug information. Prefer using -fspv-debug=vulkan-with-source. "
"See https://github.com/KhronosGroup/SPIRV-Registry/issues/203",
{});
}
TranslationUnitDecl *tu = context.getTranslationUnitDecl();
uint32_t numEntryPoints = 0;
// The entry function is the seed of the queue.
for (auto *decl : tu->decls()) {
if (auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
if (spvContext.isLib()) {
if (const auto *shaderAttr = funcDecl->getAttr<HLSLShaderAttr>()) {
// If we are compiling as a library then add everything that has a
// ShaderAttr.
addFunctionToWorkQueue(getShaderModelKind(shaderAttr->getStage()),
funcDecl, /*isEntryFunction*/ true);
numEntryPoints++;
} else if (funcDecl->getAttr<HLSLExportAttr>()) {
addFunctionToWorkQueue(spvContext.getCurrentShaderModelKind(),
funcDecl, /*isEntryFunction*/ false);
}
} else {
const bool isPrototype = !funcDecl->isThisDeclarationADefinition();
if (funcDecl->getName() == hlslEntryFunctionName && !isPrototype) {
addFunctionToWorkQueue(spvContext.getCurrentShaderModelKind(),
funcDecl, /*isEntryFunction*/ true);
numEntryPoints++;
}
}
} else {
doDecl(decl);
}
if (context.getDiagnostics().hasErrorOccurred())
return;
}
// Translate all functions reachable from the entry function.
// The queue can grow in the meanwhile; so need to keep evaluating
// workQueue.size().
for (uint32_t i = 0; i < workQueue.size(); ++i) {
const FunctionInfo *curEntryOrCallee = workQueue[i];
spvContext.setCurrentShaderModelKind(curEntryOrCallee->shaderModelKind);
doDecl(curEntryOrCallee->funcDecl);
if (context.getDiagnostics().hasErrorOccurred())
return;
}
// Addressing and memory model are required in a valid SPIR-V module.
// It may be promoted based on features used by this shader.
spvBuilder.setMemoryModel(spv::AddressingModel::Logical,
spv::MemoryModel::GLSL450);
// Even though the 'workQueue' grows due to the above loop, the first
// 'numEntryPoints' entries in the 'workQueue' are the ones with the HLSL
// 'shader' attribute, and must therefore be entry functions.
assert(numEntryPoints <= workQueue.size());
for (uint32_t i = 0; i < numEntryPoints; ++i) {
// TODO: assign specific StageVars w.r.t. to entry point
const FunctionInfo *entryInfo = workQueue[i];
assert(entryInfo->isEntryFunction);
spvBuilder.addEntryPoint(
getSpirvShaderStage(
entryInfo->shaderModelKind,
featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)),
entryInfo->entryFunction, getEntryPointName(entryInfo),
getInterfacesForEntryPoint(entryInfo->entryFunction));
}
// Add Location decorations to stage input/output variables.
if (!declIdMapper.decorateStageIOLocations())
return;
// Add descriptor set and binding decorations to resource variables.
if (!declIdMapper.decorateResourceBindings())
return;
// Add Coherent docrations to resource variables.
if (!declIdMapper.decorateResourceCoherent())
return;
// Add source instruction(s)
if ((spirvOptions.debugInfoSource || spirvOptions.debugInfoFile) &&
!spirvOptions.debugInfoVulkan) {
std::vector<llvm::StringRef> fileNames;
fileNames.clear();
const auto &sm = context.getSourceManager();
// Add each include file from preprocessor output
for (unsigned int i = 0; i < sm.getNumLineTableFilenames(); i++) {
fileNames.push_back(sm.getLineTableFilename(i));
}
spvBuilder.setDebugSource(spvContext.getMajorVersion(),
spvContext.getMinorVersion(), fileNames);
}
if (spirvOptions.enableMaximalReconvergence) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::MaximallyReconvergesKHR, {},
SourceLocation());
}
llvm::StringRef denormMode = spirvOptions.floatDenormalMode;
if (!denormMode.empty()) {
if (denormMode.equals_lower("preserve")) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::DenormPreserve, {32}, {});
} else if (denormMode.equals_lower("ftz")) {
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode::DenormFlushToZero, {32}, {});
} else if (denormMode.equals_lower("any")) {
// Do nothing. Since any behavior is allowed, we could optionally choose
// to translate to DenormPreserve or DenormFlushToZero if one was known to
// be more performant on most platforms.
} else {
assert(false && "unsupported denorm value");
}
}
// Output the constructed module.
std::vector<uint32_t> m = spvBuilder.takeModule();
if (context.getDiagnostics().hasErrorOccurred())
return;
if (!UpgradeToVulkanMemoryModelIfNeeded(&m)) {
return;
}
// Check the existance of Texture and Sampler with
// [[vk::combinedImageSampler]] for the same descriptor set and binding.
auto resourceInfoForSampledImages =
spvContext.getResourceInfoForSampledImages();
auto dsetBindingWithoutTextureOrSampler =
getDSetBindingWithoutTextureOrSampler(resourceInfoForSampledImages);
if (dsetBindingWithoutTextureOrSampler.descriptor_set !=
std::numeric_limits<uint32_t>::max()) {
emitFatalError(
"Texture or Sampler with [[vk::combinedImageSampler]] attribute is "
"missing for descriptor set and binding: %0, %1",
{})
<< dsetBindingWithoutTextureOrSampler.descriptor_set
<< dsetBindingWithoutTextureOrSampler.binding;
return;
}
auto dsetbindingsToCombineImageSampler =
collectDSetBindingsToCombineSampledImage(resourceInfoForSampledImages);
// In order to flatten composite resources, we must also unroll loops.
// Therefore we should run legalization before optimization.
needsLegalization =
needsLegalization || declIdMapper.requiresLegalization() ||
spirvOptions.flattenResourceArrays || spirvOptions.reduceLoadSize ||
declIdMapper.requiresFlatteningCompositeResources() ||
!dsetbindingsToCombineImageSampler.empty() ||
spirvOptions.signaturePacking;
// Run legalization passes
if (spirvOptions.codeGenHighLevel) {
beforeHlslLegalization = needsLegalization;
} else {
if (needsLegalization) {
std::string messages;
if (!spirvToolsLegalize(&m, &messages,
&dsetbindingsToCombineImageSampler)) {
emitFatalError("failed to legalize SPIR-V: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return;
} else if (!messages.empty()) {
emitWarning("SPIR-V legalization: %0", {}) << messages;
}
}
if (theCompilerInstance.getCodeGenOpts().OptimizationLevel > 0) {
// Run optimization passes
std::string messages;
if (!spirvToolsOptimize(&m, &messages)) {
emitFatalError("failed to optimize SPIR-V: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return;
}
}
// Fixup debug instruction opcodes: change the opcode to
// OpExtInstWithForwardRefsKHR is the instruction at least one forward
// reference.
if (spirvOptions.debugInfoRich) {
std::string messages;
if (!spirvToolsFixupOpExtInst(&m, &messages)) {
emitFatalError("failed to fix OpExtInst opcodes: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return;
} else if (!messages.empty()) {
emitWarning("SPIR-V fix-opextinst-opcodes: %0", {}) << messages;
}
}
// Trim unused capabilities.
// When optimizations are enabled, some optimization passes like DCE could
// make some capabilities useless. To avoid logic duplication between this
// pass, and DXC, DXC generates some capabilities unconditionally. This
// means we should run this pass, even when optimizations are disabled.
{
std::string messages;
if (!spirvToolsTrimCapabilities(&m, &messages)) {
emitFatalError("failed to trim capabilities: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return;
} else if (!messages.empty()) {
emitWarning("SPIR-V capability trimming: %0", {}) << messages;
}
}
}
// Validate the generated SPIR-V code
if (!spirvOptions.disableValidation) {
std::string messages;
if (!spirvToolsValidate(&m, &messages)) {
emitFatalError("generated SPIR-V is invalid: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return;
}
}
theCompilerInstance.getOutStream()->write(
reinterpret_cast<const char *>(m.data()), m.size() * 4);
}
void SpirvEmitter::doDecl(const Decl *decl) {
if (isa<EmptyDecl>(decl) || isa<TypeAliasTemplateDecl>(decl) ||
isa<VarTemplateDecl>(decl))
return;
// Implicit decls are lazily created when needed.
if (decl->isImplicit()) {
return;
}
if (const auto *varDecl = dyn_cast<VarDecl>(decl)) {
doVarDecl(varDecl);
} else if (const auto *namespaceDecl = dyn_cast<NamespaceDecl>(decl)) {
for (auto *subDecl : namespaceDecl->decls())
// Note: We only emit functions as they are discovered through the call
// graph starting from the entry-point. We should not emit unused
// functions inside namespaces.
if (!isa<FunctionDecl>(subDecl))
doDecl(subDecl);
} else if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
doFunctionDecl(funcDecl);
} else if (const auto *bufferDecl = dyn_cast<HLSLBufferDecl>(decl)) {
doHLSLBufferDecl(bufferDecl);
} else if (const auto *recordDecl = dyn_cast<RecordDecl>(decl)) {
doRecordDecl(recordDecl);
} else if (const auto *enumDecl = dyn_cast<EnumDecl>(decl)) {
doEnumDecl(enumDecl);
} else if (const auto *classTemplateDecl =
dyn_cast<ClassTemplateDecl>(decl)) {
doClassTemplateDecl(classTemplateDecl);
} else if (isa<TypedefNameDecl>(decl)) {
declIdMapper.recordsSpirvTypeAlias(decl);
} else if (isa<FunctionTemplateDecl>(decl)) {
// nothing to do.
} else if (isa<UsingDecl>(decl)) {
// nothing to do.
} else if (isa<UsingDirectiveDecl>(decl)) {
// nothing to do.
} else {
emitError("decl type %0 unimplemented", decl->getLocation())
<< decl->getDeclKindName();
}
}
RichDebugInfo *
SpirvEmitter::getOrCreateRichDebugInfo(const SourceLocation &loc) {
const StringRef file =
astContext.getSourceManager().getPresumedLoc(loc).getFilename();
auto &debugInfo = spvContext.getDebugInfo();
auto it = debugInfo.find(file);
if (it != debugInfo.end())
return &it->second;
auto *dbgSrc = spvBuilder.createDebugSource(file);
// debugInfo.insert() inserts {string key, RichDebugInfo} pair and
// returns {{string key, RichDebugInfo}, true /*Success*/}.
// debugInfo.insert().first->second is a RichDebugInfo.
return &debugInfo
.insert({file, RichDebugInfo(
dbgSrc, spvBuilder.createDebugCompilationUnit(
dbgSrc))})
.first->second;
}
void SpirvEmitter::doStmt(const Stmt *stmt,
llvm::ArrayRef<const Attr *> attrs) {
if (const auto *compoundStmt = dyn_cast<CompoundStmt>(stmt)) {
if (spirvOptions.debugInfoRich && stmt->getLocStart() != SourceLocation()) {
// Any opening of curly braces ('{') starts a CompoundStmt in the AST
// tree. It also means we have a new lexical block!
const auto loc = stmt->getLocStart();
const auto &sm = astContext.getSourceManager();
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
RichDebugInfo *info = getOrCreateRichDebugInfo(loc);
auto *debugLexicalBlock = spvBuilder.createDebugLexicalBlock(
info->source, line, column, info->scopeStack.back());
// Add this lexical block to the stack of lexical scopes.
spvContext.pushDebugLexicalScope(info, debugLexicalBlock);
// Update or add DebugScope.
if (spvBuilder.getInsertPoint()->empty()) {
spvBuilder.getInsertPoint()->updateDebugScope(
new (spvContext) SpirvDebugScope(debugLexicalBlock));
} else if (!spvBuilder.isCurrentBasicBlockTerminated()) {
spvBuilder.createDebugScope(debugLexicalBlock);
}
// Iterate over sub-statements
for (auto *st : compoundStmt->body())
doStmt(st, {});
// We are done with processing this compound statement. Remove its lexical
// block from the stack of lexical scopes.
spvContext.popDebugLexicalScope(info);
if (!spvBuilder.isCurrentBasicBlockTerminated()) {
spvBuilder.createDebugScope(spvContext.getCurrentLexicalScope());
}
} else {
// Iterate over sub-statements
for (auto *st : compoundStmt->body())
doStmt(st);
}
} else if (const auto *retStmt = dyn_cast<ReturnStmt>(stmt)) {
doReturnStmt(retStmt);
} else if (const auto *declStmt = dyn_cast<DeclStmt>(stmt)) {
doDeclStmt(declStmt);
} else if (const auto *ifStmt = dyn_cast<IfStmt>(stmt)) {
doIfStmt(ifStmt, attrs);
} else if (const auto *switchStmt = dyn_cast<SwitchStmt>(stmt)) {
doSwitchStmt(switchStmt, attrs);
} else if (dyn_cast<CaseStmt>(stmt)) {
processCaseStmtOrDefaultStmt(stmt);
} else if (dyn_cast<DefaultStmt>(stmt)) {
processCaseStmtOrDefaultStmt(stmt);
} else if (const auto *breakStmt = dyn_cast<BreakStmt>(stmt)) {
doBreakStmt(breakStmt);
} else if (const auto *theDoStmt = dyn_cast<DoStmt>(stmt)) {
doDoStmt(theDoStmt, attrs);
} else if (const auto *discardStmt = dyn_cast<DiscardStmt>(stmt)) {
doDiscardStmt(discardStmt);
} else if (const auto *continueStmt = dyn_cast<ContinueStmt>(stmt)) {
doContinueStmt(continueStmt);
} else if (const auto *whileStmt = dyn_cast<WhileStmt>(stmt)) {
doWhileStmt(whileStmt, attrs);
} else if (const auto *forStmt = dyn_cast<ForStmt>(stmt)) {
doForStmt(forStmt, attrs);
} else if (dyn_cast<NullStmt>(stmt)) {
// For the null statement ";". We don't need to do anything.
} else if (const auto *attrStmt = dyn_cast<AttributedStmt>(stmt)) {
doStmt(attrStmt->getSubStmt(), attrStmt->getAttrs());
} else if (const auto *expr = dyn_cast<Expr>(stmt)) {
// All cases for expressions used as statements
SpirvInstruction *result = doExpr(expr);
if (result && result->getKind() == SpirvInstruction::IK_ExecutionMode &&
!attrs.empty()) {
// Handle [[vk::ext_capability(..)]] and [[vk::ext_extension(..)]]
// attributes for vk::ext_execution_mode[_id](..).
createSpirvIntrInstExt(
attrs, QualType(),
/*spvArgs*/ llvm::SmallVector<SpirvInstruction *, 1>{},
/*isInstr*/ false, expr->getExprLoc());
}
} else {
emitError("statement class '%0' unimplemented", stmt->getLocStart())
<< stmt->getStmtClassName() << stmt->getSourceRange();
}
}
SpirvInstruction *SpirvEmitter::doExpr(const Expr *expr,
SourceRange rangeOverride) {
SpirvInstruction *result = nullptr;
expr = expr->IgnoreParens();
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
if (const auto *declRefExpr = dyn_cast<DeclRefExpr>(expr)) {
auto *decl = declRefExpr->getDecl();
if (isImplicitVarDeclInVkNamespace(declRefExpr->getDecl())) {
result = doExpr(cast<VarDecl>(decl)->getInit());
} else {
result = declIdMapper.getDeclEvalInfo(decl, expr->getLocStart(), range);
}
} else if (const auto *memberExpr = dyn_cast<MemberExpr>(expr)) {
result = doMemberExpr(memberExpr, range);
} else if (const auto *castExpr = dyn_cast<CastExpr>(expr)) {
result = doCastExpr(castExpr, range);
} else if (const auto *initListExpr = dyn_cast<InitListExpr>(expr)) {
result = doInitListExpr(initListExpr, range);
} else if (const auto *boolLiteral = dyn_cast<CXXBoolLiteralExpr>(expr)) {
result =
spvBuilder.getConstantBool(boolLiteral->getValue(), isSpecConstantMode);
result->setRValue();
} else if (const auto *intLiteral = dyn_cast<IntegerLiteral>(expr)) {
result = constEvaluator.translateAPInt(intLiteral->getValue(),
expr->getType(), isSpecConstantMode);
result->setRValue();
} else if (const auto *floatLiteral = dyn_cast<FloatingLiteral>(expr)) {
result = constEvaluator.translateAPFloat(
floatLiteral->getValue(), expr->getType(), isSpecConstantMode);
result->setRValue();
} else if (const auto *stringLiteral = dyn_cast<StringLiteral>(expr)) {
result = spvBuilder.getString(stringLiteral->getString());
} else if (const auto *compoundAssignOp =
dyn_cast<CompoundAssignOperator>(expr)) {
// CompoundAssignOperator is a subclass of BinaryOperator. It should be
// checked before BinaryOperator.
result = doCompoundAssignOperator(compoundAssignOp);
} else if (const auto *binOp = dyn_cast<BinaryOperator>(expr)) {
result = doBinaryOperator(binOp);
} else if (const auto *unaryOp = dyn_cast<UnaryOperator>(expr)) {
result = doUnaryOperator(unaryOp);
} else if (const auto *vecElemExpr = dyn_cast<HLSLVectorElementExpr>(expr)) {
result = doHLSLVectorElementExpr(vecElemExpr, range);
} else if (const auto *matElemExpr = dyn_cast<ExtMatrixElementExpr>(expr)) {
result = doExtMatrixElementExpr(matElemExpr);
} else if (const auto *funcCall = dyn_cast<CallExpr>(expr)) {
result = doCallExpr(funcCall, range);
} else if (const auto *subscriptExpr = dyn_cast<ArraySubscriptExpr>(expr)) {
result = doArraySubscriptExpr(subscriptExpr, range);
} else if (const auto *condExpr = dyn_cast<ConditionalOperator>(expr)) {
// Beginning with HLSL 2021, the ternary operator is short-circuited.
if (getCompilerInstance().getLangOpts().HLSLVersion >=
hlsl::LangStd::v2021) {
result = doShortCircuitedConditionalOperator(condExpr);
} else {
const Expr *cond = condExpr->getCond();
const Expr *falseExpr = condExpr->getFalseExpr();
const Expr *trueExpr = condExpr->getTrueExpr();
result = doConditional(condExpr, cond, falseExpr, trueExpr);
}
} else if (const auto *defaultArgExpr = dyn_cast<CXXDefaultArgExpr>(expr)) {
if (defaultArgExpr->getParam()->hasUninstantiatedDefaultArg()) {
auto defaultArg =
defaultArgExpr->getParam()->getUninstantiatedDefaultArg();
result = castToType(doExpr(defaultArg), defaultArg->getType(),
defaultArgExpr->getType(), defaultArg->getLocStart(),
defaultArg->getSourceRange());
result->setRValue();
} else {
result = doExpr(defaultArgExpr->getParam()->getDefaultArg());
}
} else if (isa<CXXThisExpr>(expr)) {
assert(curThis);
result = curThis;
} else if (isa<CXXConstructExpr>(expr)) {
// For RayQuery type, we should not explicitly initialize it using
// CXXConstructExpr e.g., RayQuery<0> r = RayQuery<0>() is the same as we do
// not have a variable initialization. Setting nullptr for the SPIR-V
// instruction used for expr will let us skip the variable initialization.
if (!hlsl::IsHLSLRayQueryType(expr->getType()))
result = curThis;
} else if (const auto *unaryExpr = dyn_cast<UnaryExprOrTypeTraitExpr>(expr)) {
result = doUnaryExprOrTypeTraitExpr(unaryExpr);
} else if (const auto *tmplParamExpr =
dyn_cast<SubstNonTypeTemplateParmExpr>(expr)) {
result = doExpr(tmplParamExpr->getReplacement());
} else {
emitError("expression class '%0' unimplemented", expr->getExprLoc())
<< expr->getStmtClassName() << expr->getSourceRange();
}
return result;
}
SpirvInstruction *SpirvEmitter::loadIfGLValue(const Expr *expr,
SourceRange rangeOverride) {
// We are trying to load the value here, which is what an LValueToRValue
// implicit cast is intended to do. We can ignore the cast if exists.
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
expr = expr->IgnoreParenLValueCasts();
return loadIfGLValue(expr, doExpr(expr, range));
}
SpirvInstruction *SpirvEmitter::loadIfGLValue(const Expr *expr,
SpirvInstruction *info) {
const auto exprType = expr->getType();
// Do nothing if this is already rvalue
if (!info || info->isRValue())
return info;
// Check whether we are trying to load an array of opaque objects as a whole.
// If true, we are likely to copy it as a whole. To assist per-element
// copying, avoid the load here and return the pointer directly.
// TODO: consider moving this hack into SPIRV-Tools as a transformation.
if (isOpaqueArrayType(exprType))
return info;
// Check whether we are trying to load an externally visible structured/byte
// buffer as a whole. If true, it means we are creating alias for it. Avoid
// the load and write the pointer directly to the alias variable then.
//
// Also for the case of alias function returns. If we are trying to load an
// alias function return as a whole, it means we are assigning it to another
// alias variable. Avoid the load and write the pointer directly.
//
// Note: legalization specific code
if (isReferencingNonAliasStructuredOrByteBuffer(expr)) {
return info;
}
if (loadIfAliasVarRef(expr, &info)) {
// We are loading an alias variable as a whole here. This is likely for
// wholesale assignments or function returns. Need to load the pointer.
//
// Note: legalization specific code
return info;
}
SpirvInstruction *loadedInstr = nullptr;
loadedInstr = spvBuilder.createLoad(exprType, info, expr->getExprLoc(),
expr->getSourceRange());
assert(loadedInstr);
// Special-case: According to the SPIR-V Spec: There is no physical size or
// bit pattern defined for boolean type. Therefore an unsigned integer is used
// to represent booleans when layout is required. In such cases, after loading
// the uint, we should perform a comparison.
{
uint32_t vecSize = 1, numRows = 0, numCols = 0;
if (info->getLayoutRule() != SpirvLayoutRule::Void &&
isBoolOrVecMatOfBoolType(exprType)) {
QualType uintType = astContext.UnsignedIntTy;
if (isScalarType(exprType) || isVectorType(exprType, nullptr, &vecSize)) {
const auto fromType =
vecSize == 1 ? uintType
: astContext.getExtVectorType(uintType, vecSize);
loadedInstr =
castToBool(loadedInstr, fromType, exprType, expr->getLocStart());
} else {
const bool isMat = isMxNMatrix(exprType, nullptr, &numRows, &numCols);
assert(isMat);
(void)isMat;
const clang::Type *type = exprType.getCanonicalType().getTypePtr();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateSpecDecl =
cast<ClassTemplateSpecializationDecl>(RT->getDecl());
ClassTemplateDecl *templateDecl =
templateSpecDecl->getSpecializedTemplate();
const auto fromType = getHLSLMatrixType(
astContext, theCompilerInstance.getSema(), templateDecl,
astContext.UnsignedIntTy, numRows, numCols);
loadedInstr =
castToBool(loadedInstr, fromType, exprType, expr->getLocStart());
}
// Now that it is converted to Bool, it has no layout rule.
// This result-id should be evaluated as bool from here on out.
loadedInstr->setLayoutRule(SpirvLayoutRule::Void);
}
}
loadedInstr->setRValue();
return loadedInstr;
}
SpirvInstruction *SpirvEmitter::loadIfAliasVarRef(const Expr *expr,
SourceRange rangeOverride) {
const auto range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
auto *instr = doExpr(expr, range);
loadIfAliasVarRef(expr, &instr, range);
return instr;
}
bool SpirvEmitter::loadIfAliasVarRef(const Expr *varExpr,
SpirvInstruction **instr,
SourceRange rangeOverride) {
assert(instr);
const auto range = (rangeOverride != SourceRange())
? rangeOverride
: varExpr->getSourceRange();
if ((*instr) && (*instr)->containsAliasComponent() &&
isAKindOfStructuredOrByteBuffer(varExpr->getType())) {
// Load the pointer of the aliased-to-variable if the expression has a
// pointer to pointer type.
if (varExpr->isGLValue()) {
*instr = spvBuilder.createLoad(varExpr->getType(), *instr,
varExpr->getExprLoc(), range);
}
return true;
}
return false;
}
SpirvInstruction *SpirvEmitter::castToType(SpirvInstruction *value,
QualType fromType, QualType toType,
SourceLocation srcLoc,
SourceRange range) {
uint32_t fromSize = 0;
uint32_t toSize = 0;
assert(isVectorType(fromType, nullptr, &fromSize) ==
isVectorType(toType, nullptr, &toSize) &&
fromSize == toSize);
// Avoid unused variable warning in release builds
(void)(fromSize);
(void)(toSize);
if (isFloatOrVecMatOfFloatType(toType))
return castToFloat(value, fromType, toType, srcLoc, range);
// Order matters here. Bool (vector) values will also be considered as uint
// (vector) values. So given a bool (vector) argument, isUintOrVecOfUintType()
// will also return true. We need to check bool before uint. The opposite is
// not true.
if (isBoolOrVecMatOfBoolType(toType))
return castToBool(value, fromType, toType, srcLoc, range);
if (isSintOrVecMatOfSintType(toType) || isUintOrVecMatOfUintType(toType))
return castToInt(value, fromType, toType, srcLoc, range);
emitError("casting to type %0 unimplemented", {}) << toType;
return nullptr;
}
void SpirvEmitter::doFunctionDecl(const FunctionDecl *decl) {
// Forward declaration of a function inside another.
if (!decl->isThisDeclarationADefinition()) {
addFunctionToWorkQueue(spvContext.getCurrentShaderModelKind(), decl,
/*isEntryFunction*/ false);
return;
}
// A RAII class for maintaining the current function under traversal.
class FnEnvRAII {
public:
// Creates a new instance which sets fnEnv to the newFn on creation,
// and resets fnEnv to its original value on destruction.
FnEnvRAII(const FunctionDecl **fnEnv, const FunctionDecl *newFn)
: oldFn(*fnEnv), fnSlot(fnEnv) {
*fnEnv = newFn;
}
~FnEnvRAII() { *fnSlot = oldFn; }
private:
const FunctionDecl *oldFn;
const FunctionDecl **fnSlot;
};
FnEnvRAII fnEnvRAII(&curFunction, decl);
// We are about to start translation for a new function. Clear the break stack
// and the continue stack.
breakStack = std::stack<SpirvBasicBlock *>();
continueStack = std::stack<SpirvBasicBlock *>();
// This will allow the entry-point name to be something like
// myNamespace::myEntrypointFunc.
std::string funcName = getFnName(decl);
std::string debugFuncName = funcName;
SpirvFunction *func = declIdMapper.getOrRegisterFn(decl);
auto loc = decl->getLocStart();
auto range = decl->getSourceRange();
RichDebugInfo *info = nullptr;
SpirvDebugFunction *debugFunction = nullptr;
SpirvDebugInstruction *outer_scope = spvContext.getCurrentLexicalScope();
const auto &sm = astContext.getSourceManager();
if (spirvOptions.debugInfoRich && decl->hasBody()) {
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
info = getOrCreateRichDebugInfo(loc);
auto *source = info->source;
// Note that info->scopeStack.back() is a lexical scope of the function
// caller.
auto *parentScope = info->compilationUnit;
// TODO: figure out the proper flag based on the function decl.
// using FlagIsPublic for now.
uint32_t flags = 3u;
// The line number in the source program at which the function scope begins.
auto scopeLine = sm.getPresumedLineNumber(decl->getBody()->getLocStart());
debugFunction = spvBuilder.createDebugFunction(decl, debugFuncName, source,
line, column, parentScope,
"", flags, scopeLine, func);
func->setDebugScope(new (spvContext) SpirvDebugScope(debugFunction));
spvContext.pushDebugLexicalScope(info, debugFunction);
}
bool isEntry = false;
const auto iter = functionInfoMap.find(decl);
if (iter != functionInfoMap.end()) {
const auto &entryInfo = iter->second;
if (entryInfo->isEntryFunction) {
isEntry = true;
funcName = "src." + funcName;
// Create wrapper for the entry function
if (!emitEntryFunctionWrapper(decl, func))
return;
// Generate DebugEntryPoint if function definition
if (spirvOptions.debugInfoVulkan && debugFunction) {
auto *cu = dyn_cast<SpirvDebugCompilationUnit>(outer_scope);
assert(cu && "expected DebugCompilationUnit");
spvBuilder.createDebugEntryPoint(debugFunction, cu,
clang::getGitCommitHash(),
spirvOptions.clOptions);
}
}
}
const QualType retType =
declIdMapper.getTypeAndCreateCounterForPotentialAliasVar(decl);
spvBuilder.beginFunction(retType, decl->getLocStart(), funcName,
decl->hasAttr<HLSLPreciseAttr>(),
decl->hasAttr<NoInlineAttr>(), func);
bool isNonStaticMemberFn = false;
if (const auto *memberFn = dyn_cast<CXXMethodDecl>(decl)) {
if (!memberFn->isStatic()) {
// For non-static member function, the first parameter should be the
// object on which we are invoking this method.
QualType valueType = memberFn->getThisType(astContext)->getPointeeType();
// Remember the parameter for the 'this' object so later we can handle
// CXXThisExpr correctly.
curThis = spvBuilder.addFnParam(valueType, /*isPrecise*/ false,
/*isNoInterp*/ false, decl->getLocStart(),
"param.this");
if (isOrContainsAKindOfStructuredOrByteBuffer(valueType)) {
curThis->setContainsAliasComponent(true);
needsLegalization = true;
}
if (spirvOptions.debugInfoRich) {
// Add DebugLocalVariable information
const auto &sm = astContext.getSourceManager();
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
if (!info)
info = getOrCreateRichDebugInfo(loc);
// TODO: replace this with FlagArtificial|FlagObjectPointer.
uint32_t flags = (1 << 5) | (1 << 8);
auto *debugLocalVar = spvBuilder.createDebugLocalVariable(
valueType, "this", info->source, line, column,
info->scopeStack.back(), flags, 1);
spvBuilder.createDebugDeclare(debugLocalVar, curThis, loc, range);
}
isNonStaticMemberFn = true;
}
}
// Create all parameters.
for (uint32_t i = 0; i < decl->getNumParams(); ++i) {
const ParmVarDecl *paramDecl = decl->getParamDecl(i);
(void)declIdMapper.createFnParam(paramDecl, i + 1 + isNonStaticMemberFn);
}
if (decl->hasBody()) {
// The entry basic block.
auto *entryLabel = spvBuilder.createBasicBlock("bb.entry");
spvBuilder.setInsertPoint(entryLabel);
// Add DebugFunctionDefinition if we are emitting
// NonSemantic.Shader.DebugInfo.100 debug info
if (spirvOptions.debugInfoVulkan && debugFunction)
spvBuilder.createDebugFunctionDef(debugFunction, func);
// Process all statments in the body.
parentMap = std::make_unique<ParentMap>(decl->getBody());
doStmt(decl->getBody());
parentMap.reset(nullptr);
// We have processed all Stmts in this function and now in the last
// basic block. Make sure we have a termination instruction.
if (!spvBuilder.isCurrentBasicBlockTerminated()) {
const auto retType = decl->getReturnType();
const auto returnLoc = decl->getBody()->getLocEnd();
if (retType->isVoidType()) {
spvBuilder.createReturn(returnLoc);
} else {
// If the source code does not provide a proper return value for some
// control flow path, it's undefined behavior. We just return an
// undefined value here.
spvBuilder.createReturnValue(spvBuilder.getUndef(retType), returnLoc);
}
}
}
spvBuilder.endFunction();
if (spirvOptions.debugInfoRich) {
spvContext.popDebugLexicalScope(info);
}
}
bool SpirvEmitter::validateVKAttributes(const NamedDecl *decl) {
bool success = true;
if (decl->getAttr<VKInputAttachmentIndexAttr>()) {
if (!decl->isExternallyVisible()) {
emitError("SubpassInput(MS) must be externally visible",
decl->getLocation());
success = false;
}
// We only allow VKInputAttachmentIndexAttr to be attached to global
// variables. So it should be fine to cast here.
const auto elementType =
hlsl::GetHLSLResourceResultType(cast<VarDecl>(decl)->getType());
if (!isScalarType(elementType) && !isVectorType(elementType)) {
emitError(
"only scalar/vector types allowed as SubpassInput(MS) parameter type",
decl->getLocation());
// Return directly to avoid further type processing, which will hit
// asserts when lowering the type.
return false;
}
}
// The frontend will make sure that
// * vk::push_constant applies to global variables of struct type
// * vk::binding applies to global variables or cbuffers/tbuffers
// * vk::counter_binding applies to global variables of RW/Append/Consume
// StructuredBuffer
// * vk::location applies to function parameters/returns and struct fields
// So the only case we need to check co-existence is vk::push_constant and
// vk::binding.
if (const auto *pcAttr = decl->getAttr<VKPushConstantAttr>()) {
const auto loc = pcAttr->getLocation();
if (seenPushConstantAt.isInvalid()) {
seenPushConstantAt = loc;
} else {
// TODO: Actually this is slightly incorrect. The Vulkan spec says:
// There must be no more than one push constant block statically used
// per shader entry point.
// But we are checking whether there are more than one push constant
// blocks defined. Tracking usage requires more work.
emitError("cannot have more than one push constant block", loc);
emitNote("push constant block previously defined here",
seenPushConstantAt);
success = false;
}
if (decl->hasAttr<VKBindingAttr>()) {
emitError("vk::push_constant attribute cannot be used together with "
"vk::binding attribute",
loc);
success = false;
}
}
// vk::shader_record_nv is supported only on cbuffer/ConstantBuffer
if (const auto *srbAttr = decl->getAttr<VKShaderRecordNVAttr>()) {
const auto loc = srbAttr->getLocation();
const HLSLBufferDecl *bufDecl = nullptr;
bool isValidType = false;
if ((bufDecl = dyn_cast<HLSLBufferDecl>(decl)))
isValidType = bufDecl->isCBuffer();
else if ((bufDecl = dyn_cast<HLSLBufferDecl>(decl->getDeclContext())))
isValidType = bufDecl->isCBuffer();
else if (isa<VarDecl>(decl))
isValidType = isConstantBuffer(dyn_cast<VarDecl>(decl)->getType());
if (!isValidType) {
emitError(
"vk::shader_record_nv can be applied only to cbuffer/ConstantBuffer",
loc);
success = false;
}
if (decl->hasAttr<VKBindingAttr>()) {
emitError("vk::shader_record_nv attribute cannot be used together with "
"vk::binding attribute",
loc);
success = false;
}
}
// vk::shader_record_ext is supported only on cbuffer/ConstantBuffer
if (const auto *srbAttr = decl->getAttr<VKShaderRecordEXTAttr>()) {
const auto loc = srbAttr->getLocation();
const HLSLBufferDecl *bufDecl = nullptr;
bool isValidType = false;
if ((bufDecl = dyn_cast<HLSLBufferDecl>(decl)))
isValidType = bufDecl->isCBuffer();
else if ((bufDecl = dyn_cast<HLSLBufferDecl>(decl->getDeclContext())))
isValidType = bufDecl->isCBuffer();
else if (isa<VarDecl>(decl))
isValidType = isConstantBuffer(dyn_cast<VarDecl>(decl)->getType());
if (!isValidType) {
emitError(
"vk::shader_record_ext can be applied only to cbuffer/ConstantBuffer",
loc);
success = false;
}
if (decl->hasAttr<VKBindingAttr>()) {
emitError("vk::shader_record_ext attribute cannot be used together with "
"vk::binding attribute",
loc);
success = false;
}
}
// a VarDecl should have only one of vk::ext_builtin_input or
// vk::ext_builtin_output
if (decl->hasAttr<VKExtBuiltinInputAttr>() &&
decl->hasAttr<VKExtBuiltinOutputAttr>()) {
emitError("vk::ext_builtin_input cannot be used together with "
"vk::ext_builtin_output",
decl->getAttr<VKExtBuiltinOutputAttr>()->getLocation());
success = false;
}
// vk::ext_builtin_input and vk::ext_builtin_output must only be used for a
// static variable. We only allow them to be attached to variables, so it
// should be fine to cast here.
if ((decl->hasAttr<VKExtBuiltinInputAttr>() ||
decl->hasAttr<VKExtBuiltinOutputAttr>()) &&
cast<VarDecl>(decl)->getStorageClass() != StorageClass::SC_Static) {
emitError("vk::ext_builtin_input and vk::ext_builtin_output can only be "
"applied to a static variable",
decl->getLocation());
success = false;
}
// vk::ext_builtin_input and vk::ext_builtin_output must only be used for a
// static variable. We only allow them to be attached to variables, so it
// should be fine to cast here.
if (decl->hasAttr<VKExtBuiltinInputAttr>() &&
!cast<VarDecl>(decl)->getType().isConstQualified()) {
emitError("vk::ext_builtin_input can only be applied to a const-qualified "
"variable",
decl->getLocation());
success = false;
}
return success;
}
void SpirvEmitter::registerCapabilitiesAndExtensionsForVarDecl(
const VarDecl *varDecl) {
// First record any extensions that are part of the actual variable
// declaration.
for (auto *attribute : varDecl->specific_attrs<VKExtensionExtAttr>()) {
clang::StringRef extensionName = attribute->getName();
spvBuilder.requireExtension(extensionName, varDecl->getLocation());
}
for (auto *attribute : varDecl->specific_attrs<VKCapabilityExtAttr>()) {
spv::Capability cap = spv::Capability(attribute->getCapability());
spvBuilder.requireCapability(cap, varDecl->getLocation());
}
// Now check for any capabilities or extensions that are part of the type.
const TypedefType *type = dyn_cast<TypedefType>(varDecl->getType());
if (!type)
return;
declIdMapper.registerCapabilitiesAndExtensionsForType(type);
}
void SpirvEmitter::doHLSLBufferDecl(const HLSLBufferDecl *bufferDecl) {
// This is a cbuffer/tbuffer decl.
// Check and emit warnings for member intializers which are not
// supported in Vulkan
for (const auto *member : bufferDecl->decls()) {
if (const auto *varMember = dyn_cast<VarDecl>(member)) {
if (!spirvOptions.noWarnIgnoredFeatures) {
if (const auto *init = varMember->getInit())
emitWarning("%select{tbuffer|cbuffer}0 member initializer "
"ignored since no Vulkan equivalent",
init->getExprLoc())
<< bufferDecl->isCBuffer() << init->getSourceRange();
}
// We cannot handle external initialization of column-major matrices now.
if (isOrContainsNonFpColMajorMatrix(astContext, spirvOptions,
varMember->getType(), varMember)) {
emitError("externally initialized non-floating-point column-major "
"matrices not supported yet",
varMember->getLocation());
}
}
}
if (!validateVKAttributes(bufferDecl))
return;
if (bufferDecl->hasAttr<VKShaderRecordNVAttr>()) {
(void)declIdMapper.createShaderRecordBuffer(
bufferDecl, DeclResultIdMapper::ContextUsageKind::ShaderRecordBufferNV);
} else if (bufferDecl->hasAttr<VKShaderRecordEXTAttr>()) {
(void)declIdMapper.createShaderRecordBuffer(
bufferDecl,
DeclResultIdMapper::ContextUsageKind::ShaderRecordBufferKHR);
} else {
(void)declIdMapper.createCTBuffer(bufferDecl);
}
}
void SpirvEmitter::doClassTemplateDecl(
const ClassTemplateDecl *classTemplateDecl) {
for (auto classTemplateSpecializationDeclItr :
classTemplateDecl->specializations()) {
if (const CXXRecordDecl *recordDecl =
dyn_cast<CXXRecordDecl>(&*classTemplateSpecializationDeclItr)) {
doRecordDecl(recordDecl);
}
}
}
void SpirvEmitter::doRecordDecl(const RecordDecl *recordDecl) {
// Ignore implict records
// Somehow we'll have implicit records with:
// static const int Length = count;
// that can mess up with the normal CodeGen.
if (recordDecl->isImplicit())
return;
// Handle each static member with inline initializer.
// Each static member has a corresponding VarDecl inside the
// RecordDecl. For those defined in the translation unit,
// their VarDecls do not have initializer.
for (auto *subDecl : recordDecl->decls()) {
if (auto *varDecl = dyn_cast<VarDecl>(subDecl)) {
if (varDecl->isStaticDataMember() && varDecl->hasInit())
doVarDecl(varDecl);
} else if (auto *enumDecl = dyn_cast<EnumDecl>(subDecl)) {
doEnumDecl(enumDecl);
} else if (auto recordDecl = dyn_cast<RecordDecl>(subDecl)) {
doRecordDecl(recordDecl);
}
}
}
void SpirvEmitter::doEnumDecl(const EnumDecl *decl) {
for (auto it = decl->enumerator_begin(); it != decl->enumerator_end(); ++it)
declIdMapper.createEnumConstant(*it);
}
void SpirvEmitter::doVarDecl(const VarDecl *decl) {
if (!validateVKAttributes(decl))
return;
const auto loc = decl->getLocation();
const auto range = decl->getSourceRange();
if (isExtResultIdType(decl->getType())) {
declIdMapper.createResultId(decl);
return;
}
// HLSL has the 'string' type which can be used for rare purposes such as
// printf (SPIR-V's DebugPrintf). SPIR-V does not have a 'char' or 'string'
// type, and therefore any variable of such type should not be created.
// DeclResultIdMapper maps such decl to an OpString instruction that
// represents the variable's initializer literal.
if (isStringType(decl->getType())) {
declIdMapper.createOrUpdateStringVar(decl);
return;
}
// We cannot handle external initialization of column-major matrices now.
if (isExternalVar(decl) &&
isOrContainsNonFpColMajorMatrix(astContext, spirvOptions, decl->getType(),
decl)) {
emitError("externally initialized non-floating-point column-major "
"matrices not supported yet",
loc);
}
// Reject arrays of RW/append/consume structured buffers. They have assoicated
// counters, which are quite nasty to handle.
if (decl->getType()->isArrayType() &&
isRWAppendConsumeSBuffer(decl->getType())) {
if (!spirvOptions.allowRWStructuredBufferArrays) {
emitError("arrays of RW/append/consume structured buffers unsupported",
loc);
return;
} else if (decl->getType()
->getAsArrayTypeUnsafe()
->getElementType()
->isArrayType()) {
// See
// https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#interfaces-resources-setandbinding
emitError("Multi-dimensional arrays of RW/append/consume structured "
"buffers are unsupported in Vulkan",
loc);
return;
}
}
if (decl->hasAttr<VKConstantIdAttr>()) {
// This is a VarDecl for specialization constant.
createSpecConstant(decl);
return;
}
if (decl->hasAttr<VKPushConstantAttr>()) {
// This is a VarDecl for PushConstant block.
(void)declIdMapper.createPushConstant(decl);
return;
}
if (decl->hasAttr<VKShaderRecordNVAttr>()) {
(void)declIdMapper.createShaderRecordBuffer(
decl, DeclResultIdMapper::ContextUsageKind::ShaderRecordBufferNV);
return;
}
if (decl->hasAttr<VKShaderRecordEXTAttr>()) {
(void)declIdMapper.createShaderRecordBuffer(
decl, DeclResultIdMapper::ContextUsageKind::ShaderRecordBufferKHR);
return;
}
registerCapabilitiesAndExtensionsForVarDecl(decl);
// Handle vk::ext_builtin_input and vk::ext_builtin_input by using
// getBuiltinVar to create the builtin and validate the storage class
if (decl->hasAttr<VKExtBuiltinInputAttr>()) {
auto *builtinAttr = decl->getAttr<VKExtBuiltinInputAttr>();
int builtinId = builtinAttr->getBuiltInID();
SpirvVariable *builtinVar =
declIdMapper.getBuiltinVar(spv::BuiltIn(builtinId), decl->getType(),
spv::StorageClass::Input, loc);
if (builtinVar->getStorageClass() != spv::StorageClass::Input) {
emitError("cannot redefine builtin %0 as an input",
builtinAttr->getLocation())
<< builtinId;
emitWarning("previous definition is here",
builtinVar->getSourceLocation());
}
return;
} else if (decl->hasAttr<VKExtBuiltinOutputAttr>()) {
auto *builtinAttr = decl->getAttr<VKExtBuiltinOutputAttr>();
int builtinId = builtinAttr->getBuiltInID();
SpirvVariable *builtinVar =
declIdMapper.getBuiltinVar(spv::BuiltIn(builtinId), decl->getType(),
spv::StorageClass::Output, loc);
if (builtinVar->getStorageClass() != spv::StorageClass::Output) {
emitError("cannot redefine builtin %0 as an output",
builtinAttr->getLocation())
<< builtinId;
emitWarning("previous definition is here",
builtinVar->getSourceLocation());
}
return;
}
// We can have VarDecls inside cbuffer/tbuffer. For those VarDecls, we need
// to emit their cbuffer/tbuffer as a whole and access each individual one
// using access chains.
// cbuffers and tbuffers are HLSLBufferDecls
// ConstantBuffers and TextureBuffers are not HLSLBufferDecls.
if (const auto *bufferDecl =
dyn_cast<HLSLBufferDecl>(decl->getDeclContext())) {
// This is a VarDecl of cbuffer/tbuffer type.
doHLSLBufferDecl(bufferDecl);
return;
}
if (decl->getAttr<VKInputAttachmentIndexAttr>()) {
if (!spvContext.isPS()) {
// SubpassInput(MS) variables are only allowed in pixel shaders. In this
// case, we avoid create the declaration because it should not be used.
return;
}
}
SpirvVariable *var = nullptr;
// The contents in externally visible variables can be updated via the
// pipeline. They should be handled differently from file and function scope
// variables.
// File scope variables (static "global" and "local" variables) belongs to
// the Private storage class, while function scope variables (normal "local"
// variables) belongs to the Function storage class.
if (isExternalVar(decl)) {
var = declIdMapper.createExternVar(decl);
} else {
// We already know the variable is not externally visible here. If it does
// not have local storage, it should be file scope variable.
const bool isFileScopeVar = !decl->hasLocalStorage();
if (isFileScopeVar)
var = declIdMapper.createFileVar(decl, llvm::None);
else
var = declIdMapper.createFnVar(decl, llvm::None);
// Emit OpStore to initialize the variable
// TODO: revert back to use OpVariable initializer
// We should only evaluate the initializer once for a static variable.
if (isFileScopeVar) {
if (decl->isStaticLocal()) {
initOnce(decl->getType(), decl->getName(), var, decl->getInit());
} else {
// Defer to initialize these global variables at the beginning of the
// entry function.
toInitGloalVars.push_back(decl);
}
}
// Function local variables. Just emit OpStore at the current insert point.
else if (const Expr *init = decl->getInit()) {
if (auto *constInit =
constEvaluator.tryToEvaluateAsConst(init, isSpecConstantMode)) {
spvBuilder.createStore(var, constInit, loc, range);
} else {
storeValue(var, loadIfGLValue(init), decl->getType(), loc, range);
}
// Update counter variable associated with local variables
tryToAssignCounterVar(decl, init);
}
if (!isFileScopeVar && spirvOptions.debugInfoRich) {
// Add DebugLocalVariable information
const auto &sm = astContext.getSourceManager();
const uint32_t line = sm.getPresumedLineNumber(loc);
const uint32_t column = sm.getPresumedColumnNumber(loc);
const auto *info = getOrCreateRichDebugInfo(loc);
// TODO: replace this with FlagIsLocal enum.
uint32_t flags = 1 << 2;
auto *debugLocalVar = spvBuilder.createDebugLocalVariable(
decl->getType(), decl->getName(), info->source, line, column,
info->scopeStack.back(), flags);
spvBuilder.createDebugDeclare(debugLocalVar, var, loc, range);
}
// Variables that are not externally visible and of opaque types should
// request legalization.
if (!needsLegalization && isOpaqueType(decl->getType()))
needsLegalization = true;
}
if (var != nullptr && decl->hasAttrs()) {
declIdMapper.decorateWithIntrinsicAttrs(decl, var);
if (auto attr = decl->getAttr<VKStorageClassExtAttr>()) {
var->setStorageClass(static_cast<spv::StorageClass>(attr->getStclass()));
}
}
// All variables that are of opaque struct types should request legalization.
if (!needsLegalization && isOpaqueStructType(decl->getType()))
needsLegalization = true;
}
spv::LoopControlMask SpirvEmitter::translateLoopAttribute(const Stmt *stmt,
const Attr &attr) {
switch (attr.getKind()) {
case attr::HLSLLoop:
case attr::HLSLFastOpt:
return spv::LoopControlMask::DontUnroll;
case attr::HLSLUnroll:
return spv::LoopControlMask::Unroll;
case attr::HLSLAllowUAVCondition:
if (!spirvOptions.noWarnIgnoredFeatures) {
emitWarning("unsupported allow_uav_condition attribute ignored",
stmt->getLocStart());
}
break;
default:
llvm_unreachable("found unknown loop attribute");
}
return spv::LoopControlMask::MaskNone;
}
void SpirvEmitter::doDiscardStmt(const DiscardStmt *discardStmt) {
assert(!spvBuilder.isCurrentBasicBlockTerminated());
// The discard statement can only be called from a pixel shader
if (!spvContext.isPS()) {
emitError("discard statement may only be used in pixel shaders",
discardStmt->getLoc());
return;
}
if (featureManager.isExtensionEnabled(
Extension::EXT_demote_to_helper_invocation) ||
featureManager.isTargetEnvVulkan1p3OrAbove()) {
// OpDemoteToHelperInvocation(EXT) provided by SPIR-V 1.6 or
// SPV_EXT_demote_to_helper_invocation SPIR-V extension allow shaders to
// "demote" a fragment shader invocation to behave like a helper invocation
// for its duration. The demoted invocation will have no further side
// effects and will not output to the framebuffer, but remains active and
// can participate in computing derivatives and in subgroup operations. This
// is a better match for the "discard" instruction in HLSL.
spvBuilder.createDemoteToHelperInvocation(discardStmt->getLoc());
} else {
// Note: if/when the demote behavior becomes part of the core Vulkan spec,
// we should no longer generate OpKill for 'discard', and always generate
// the demote behavior.
spvBuilder.createKill(discardStmt->getLoc());
// Some statements that alter the control flow (break, continue, return, and
// discard), require creation of a new basic block to hold any statement
// that may follow them.
auto *newBB = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(newBB);
}
}
void SpirvEmitter::doDoStmt(const DoStmt *theDoStmt,
llvm::ArrayRef<const Attr *> attrs) {
// do-while loops are composed of:
//
// do {
// <body>
// } while(<check>);
//
// SPIR-V requires loops to have a merge basic block as well as a continue
// basic block. Even though do-while loops do not have an explicit continue
// block as in for-loops, we still do need to create a continue block.
//
// Since SPIR-V requires structured control flow, we need two more basic
// blocks, <header> and <merge>. <header> is the block before control flow
// diverges, and <merge> is the block where control flow subsequently
// converges. The <check> can be performed in the <continue> basic block.
// The final CFG should normally be like the following. Exceptions
// will occur with non-local exits like loop breaks or early returns.
//
// +----------+
// | header | <-----------------------------------+
// +----------+ |
// | | (true)
// v |
// +------+ +--------------------+ |
// | body | ----> | continue (<check>) |-----------+
// +------+ +--------------------+
// |
// | (false)
// +-------+ |
// | merge | <-------------+
// +-------+
//
// For more details, see "2.11. Structured Control Flow" in the SPIR-V spec.
const spv::LoopControlMask loopControl =
attrs.empty() ? spv::LoopControlMask::MaskNone
: translateLoopAttribute(theDoStmt, *attrs.front());
// Create basic blocks
auto *headerBB = spvBuilder.createBasicBlock("do_while.header");
auto *bodyBB = spvBuilder.createBasicBlock("do_while.body");
auto *continueBB = spvBuilder.createBasicBlock("do_while.continue");
auto *mergeBB = spvBuilder.createBasicBlock("do_while.merge");
// Make sure any continue statements branch to the continue block, and any
// break statements branch to the merge block.
continueStack.push(continueBB);
breakStack.push(mergeBB);
// Branch from the current insert point to the header block.
spvBuilder.createBranch(headerBB, theDoStmt->getLocStart());
spvBuilder.addSuccessor(headerBB);
// Process the <header> block
// The header block must always branch to the body.
spvBuilder.setInsertPoint(headerBB);
const Stmt *body = theDoStmt->getBody();
spvBuilder.createBranch(bodyBB,
body ? body->getLocStart() : theDoStmt->getLocStart(),
mergeBB, continueBB, loopControl);
spvBuilder.addSuccessor(bodyBB);
// The current basic block has OpLoopMerge instruction. We need to set its
// continue and merge target.
spvBuilder.setContinueTarget(continueBB);
spvBuilder.setMergeTarget(mergeBB);
// Process the <body> block
spvBuilder.setInsertPoint(bodyBB);
if (body) {
doStmt(body);
}
if (!spvBuilder.isCurrentBasicBlockTerminated()) {
spvBuilder.createBranch(continueBB, body ? body->getLocEnd()
: theDoStmt->getLocStart());
}
spvBuilder.addSuccessor(continueBB);
// Process the <continue> block. The check for whether the loop should
// continue lies in the continue block.
// *NOTE*: There's a SPIR-V rule that when a conditional branch is to occur in
// a continue block of a loop, there should be no OpSelectionMerge. Only an
// OpBranchConditional must be specified.
spvBuilder.setInsertPoint(continueBB);
SpirvInstruction *condition = nullptr;
const auto check = theDoStmt->getCond();
if (check) {
condition = doExpr(check);
} else {
condition = spvBuilder.getConstantBool(true);
}
spvBuilder.createConditionalBranch(
condition, headerBB, mergeBB, theDoStmt->getLocEnd(), nullptr, nullptr,
spv::SelectionControlMask::MaskNone, spv::LoopControlMask::MaskNone,
check ? check->getSourceRange()
: SourceRange(theDoStmt->getWhileLoc(), theDoStmt->getLocEnd()));
spvBuilder.addSuccessor(headerBB);
spvBuilder.addSuccessor(mergeBB);
// Set insertion point to the <merge> block for subsequent statements
spvBuilder.setInsertPoint(mergeBB);
// Done with the current scope's continue block and merge block.
continueStack.pop();
breakStack.pop();
}
void SpirvEmitter::doContinueStmt(const ContinueStmt *continueStmt) {
assert(!spvBuilder.isCurrentBasicBlockTerminated());
auto *continueTargetBB = continueStack.top();
spvBuilder.createBranch(continueTargetBB, continueStmt->getLocStart());
spvBuilder.addSuccessor(continueTargetBB);
// Some statements that alter the control flow (break, continue, return, and
// discard), require creation of a new basic block to hold any statement that
// may follow them. For example: StmtB and StmtC below are put inside a new
// basic block which is unreachable.
//
// while (true) {
// StmtA;
// continue;
// StmtB;
// StmtC;
// }
auto *newBB = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(newBB);
}
void SpirvEmitter::doWhileStmt(const WhileStmt *whileStmt,
llvm::ArrayRef<const Attr *> attrs) {
// While loops are composed of:
// while (<check>) { <body> }
//
// SPIR-V requires loops to have a merge basic block as well as a continue
// basic block. Even though while loops do not have an explicit continue
// block as in for-loops, we still do need to create a continue block.
//
// Since SPIR-V requires structured control flow, we need two more basic
// blocks, <header> and <merge>. <header> is the block before control flow
// diverges, and <merge> is the block where control flow subsequently
// converges. The <check> block can take the responsibility of the <header>
// block. The final CFG should normally be like the following. Exceptions
// will occur with non-local exits like loop breaks or early returns.
//
// +----------+
// | header | <------------------+
// | (check) | |
// +----------+ |
// | |
// +-------+-------+ |
// | false | true |
// | v |
// | +------+ +------------------+
// | | body | --> | continue (no-op) |
// v +------+ +------------------+
// +-------+
// | merge |
// +-------+
//
// The only exception is when the condition cannot be expressed in a single
// block. Specifically, short-circuited operators end up producing multiple
// blocks. In that case, we cannot treat the <check> block as the header
// block, and must instead have a bespoke <header> block. The condition is
// then moved into the loop. For example, given a loop in the form
// while (a && b) { <body> }
// we will generate instructions for the equivalent loop
// while (true) { if (!(a && b)) { break } <body> }
// +----------+
// | header | <------------------+
// +----------+ |
// | |
// v |
// +----------+ |
// | check | |
// +----------+ |
// | |
// +-------+-------+ |
// | false | true |
// | v |
// | +------+ +------------------+
// | | body | --> | continue (no-op) |
// v +------+ +------------------+
// +-------+
// | merge |
// +-------+
// The reason we don't unconditionally apply this transformation, which is
// technically always legal, is because it prevents loop unrolling in SPIR-V
// Tools, which does not support unrolling loops with early breaks.
// For more details, see "2.11. Structured Control Flow" in the SPIR-V spec.
const spv::LoopControlMask loopControl =
attrs.empty() ? spv::LoopControlMask::MaskNone
: translateLoopAttribute(whileStmt, *attrs.front());
const Expr *check = whileStmt->getCond();
const Stmt *body = whileStmt->getBody();
bool checkHasShortcircuitedOp = stmtTreeContainsShortCircuitedOp(check);
// Create basic blocks
auto *checkBB = spvBuilder.createBasicBlock("while.check");
auto *headerBB = checkHasShortcircuitedOp
? spvBuilder.createBasicBlock("while.header")
: checkBB;
auto *bodyBB = spvBuilder.createBasicBlock("while.body");
auto *continueBB = spvBuilder.createBasicBlock("while.continue");
auto *mergeBB = spvBuilder.createBasicBlock("while.merge");
// Make sure any continue statements branch to the continue block, and any
// break statements branch to the merge block.
continueStack.push(continueBB);
breakStack.push(mergeBB);
spvBuilder.createBranch(headerBB, whileStmt->getLocStart());
spvBuilder.addSuccessor(headerBB);
spvBuilder.setInsertPoint(headerBB);
if (checkHasShortcircuitedOp) {
// Process the <header> block.
spvBuilder.setInsertPoint(headerBB);
spvBuilder.createBranch(
checkBB,
check ? check->getLocStart()
: (body ? body->getLocStart() : whileStmt->getLocStart()),
mergeBB, continueBB, loopControl,
check
? check->getSourceRange()
: SourceRange(whileStmt->getLocStart(), whileStmt->getLocStart()));
spvBuilder.addSuccessor(checkBB);
// The current basic block has a OpLoopMerge instruction. We need to set
// its continue and merge target.
spvBuilder.setContinueTarget(continueBB);
spvBuilder.setMergeTarget(mergeBB);
// Process the <check> block.
spvBuilder.setInsertPoint(checkBB);
// If we have:
// while (int a = foo()) {...}
// we should evaluate 'a' by calling 'foo()' every single time the check has
// to occur.
if (const auto *condVarDecl = whileStmt->getConditionVariableDeclStmt())
doStmt(condVarDecl);
SpirvInstruction *condition = doExpr(check);
spvBuilder.createConditionalBranch(
condition, bodyBB, mergeBB,
check ? check->getLocEnd()
: (body ? body->getLocStart() : whileStmt->getLocStart()),
nullptr, nullptr, spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone,
check
? check->getSourceRange()
: SourceRange(whileStmt->getLocStart(), whileStmt->getLocStart()));
spvBuilder.addSuccessor(bodyBB);
spvBuilder.addSuccessor(mergeBB);
} else {
// In the case of simple or empty conditions, we can use a
// single block for <check> and <header>.
// If we have:
// while (int a = foo()) {...}
// we should evaluate 'a' by calling 'foo()' every single time the check has
// to occur.
if (const auto *condVarDecl = whileStmt->getConditionVariableDeclStmt())
doStmt(condVarDecl);
SpirvInstruction *condition = nullptr;
if (check) {
condition = doExpr(check);
} else {
condition = spvBuilder.getConstantBool(true);
}
spvBuilder.createConditionalBranch(
condition, bodyBB, mergeBB, whileStmt->getLocStart(), mergeBB,
continueBB, spv::SelectionControlMask::MaskNone, loopControl,
check ? check->getSourceRange()
: SourceRange(whileStmt->getWhileLoc(), whileStmt->getLocEnd()));
spvBuilder.addSuccessor(bodyBB);
spvBuilder.addSuccessor(mergeBB);
// The current basic block has OpLoopMerge instruction. We need to set its
// continue and merge target.
spvBuilder.setContinueTarget(continueBB);
spvBuilder.setMergeTarget(mergeBB);
}
// Process the <body> block.
spvBuilder.setInsertPoint(bodyBB);
if (body) {
doStmt(body);
}
if (!spvBuilder.isCurrentBasicBlockTerminated())
spvBuilder.createBranch(continueBB, whileStmt->getLocEnd());
spvBuilder.addSuccessor(continueBB);
// Process the <continue> block. While loops do not have an explicit
// continue block. The continue block just branches to the <header> block.
spvBuilder.setInsertPoint(continueBB);
spvBuilder.createBranch(headerBB, whileStmt->getLocEnd());
spvBuilder.addSuccessor(headerBB);
// Set insertion point to the <merge> block for subsequent statements.
spvBuilder.setInsertPoint(mergeBB);
// Done with the current scope's continue and merge blocks.
continueStack.pop();
breakStack.pop();
}
void SpirvEmitter::doForStmt(const ForStmt *forStmt,
llvm::ArrayRef<const Attr *> attrs) {
// for loops are composed of:
// for (<init>; <check>; <continue>) <body>
//
// To translate a for loop, we'll need to emit all <init> statements
// in the current basic block, and then have separate basic blocks for
// <check>, <continue>, and <body>. Besides, since SPIR-V requires
// structured control flow, we need two more basic blocks, <header>
// and <merge>. <header> is the block before control flow diverges,
// while <merge> is the block where control flow subsequently converges.
// The <check> block can take the responsibility of the <header> block.
// The final CFG should normally be like the following. Exceptions will
// occur with non-local exits like loop breaks or early returns.
// +--------+
// | init |
// +--------+
// |
// v
// +----------+
// | header | <---------------+
// | (check) | |
// +----------+ |
// | |
// +-------+-------+ |
// | false | true |
// | v |
// | +------+ +----------+
// | | body | --> | continue |
// v +------+ +----------+
// +-------+
// | merge |
// +-------+
//
// The only exception is when the condition cannot be expressed in a single
// block. Specifically, short-circuited operators end up producing multiple
// blocks. In that case, we cannot treat the <check> block as the header
// block, and must instead have a bespoke <header> block. The condition is
// then moved into the loop. For example, given a loop in the form
// for (<init>; a && b; <continue>) { <body> }
// we will generate instructions for the equivalent loop
// for (<init>; ; <continue>) { if (!(a && b)) { break } <body> }
// +--------+
// | init |
// +--------+
// |
// v
// +----------+
// | header | <---------------+
// +----------+ |
// | |
// v |
// +----------+ |
// | check | |
// +----------+ |
// | |
// +-------+-------+ |
// | false | true |
// | v |
// | +------+ +----------+
// | | body | --> | continue |
// v +------+ +----------+
// +-------+
// | merge |
// +-------+
// The reason we don't unconditionally apply this transformation, which is
// technically always legal, is because it prevents loop unrolling in SPIR-V
// Tools, which does not support unrolling loops with early breaks.
// For more details, see "2.11. Structured Control Flow" in the SPIR-V spec.
const spv::LoopControlMask loopControl =
attrs.empty() ? spv::LoopControlMask::MaskNone
: translateLoopAttribute(forStmt, *attrs.front());
const Stmt *initStmt = forStmt->getInit();
const Stmt *body = forStmt->getBody();
const Expr *check = forStmt->getCond();
bool checkHasShortcircuitedOp = stmtTreeContainsShortCircuitedOp(check);
// Create basic blocks.
auto *checkBB = spvBuilder.createBasicBlock("for.check");
auto *headerBB = checkHasShortcircuitedOp
? spvBuilder.createBasicBlock("for.header")
: checkBB;
auto *bodyBB = spvBuilder.createBasicBlock("for.body");
auto *continueBB = spvBuilder.createBasicBlock("for.continue");
auto *mergeBB = spvBuilder.createBasicBlock("for.merge");
// Make sure any continue statements branch to the continue block, and any
// break statements branch to the merge block.
continueStack.push(continueBB);
breakStack.push(mergeBB);
// Process the <init> block.
if (initStmt) {
doStmt(initStmt);
}
spvBuilder.createBranch(
headerBB, check ? check->getLocStart() : forStmt->getLocStart(), nullptr,
nullptr, spv::LoopControlMask::MaskNone,
initStmt ? initStmt->getSourceRange()
: SourceRange(forStmt->getLocStart(), forStmt->getLocStart()));
spvBuilder.addSuccessor(headerBB);
if (checkHasShortcircuitedOp) {
// Process the <header> block.
spvBuilder.setInsertPoint(headerBB);
spvBuilder.createBranch(
checkBB,
check ? check->getLocStart()
: (body ? body->getLocStart() : forStmt->getLocStart()),
mergeBB, continueBB, loopControl,
check ? check->getSourceRange()
: (initStmt ? initStmt->getSourceRange()
: SourceRange(forStmt->getLocStart(),
forStmt->getLocStart())));
spvBuilder.addSuccessor(checkBB);
// The current basic block has a OpLoopMerge instruction. We need to set
// its continue and merge target.
spvBuilder.setContinueTarget(continueBB);
spvBuilder.setMergeTarget(mergeBB);
// Process the <check> block.
spvBuilder.setInsertPoint(checkBB);
SpirvInstruction *condition = doExpr(check);
spvBuilder.createConditionalBranch(
condition, bodyBB, mergeBB,
check ? check->getLocEnd()
: (body ? body->getLocStart() : forStmt->getLocStart()),
nullptr, nullptr, spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone,
check ? check->getSourceRange()
: (initStmt ? initStmt->getSourceRange()
: SourceRange(forStmt->getLocStart(),
forStmt->getLocStart())));
spvBuilder.addSuccessor(bodyBB);
spvBuilder.addSuccessor(mergeBB);
} else {
// In the case of simple or empty conditions, we can use a
// single block for <check> and <header>.
spvBuilder.setInsertPoint(checkBB);
SpirvInstruction *condition = nullptr;
if (check) {
condition = doExpr(check);
} else {
condition = spvBuilder.getConstantBool(true);
}
spvBuilder.createConditionalBranch(
condition, bodyBB, mergeBB,
check ? check->getLocEnd()
: (body ? body->getLocStart() : forStmt->getLocStart()),
mergeBB, continueBB, spv::SelectionControlMask::MaskNone, loopControl,
check ? check->getSourceRange()
: (initStmt ? initStmt->getSourceRange()
: SourceRange(forStmt->getLocStart(),
forStmt->getLocStart())));
spvBuilder.addSuccessor(bodyBB);
spvBuilder.addSuccessor(mergeBB);
// The current basic block has a OpLoopMerge instruction. We need to set
// its continue and merge target.
spvBuilder.setContinueTarget(continueBB);
spvBuilder.setMergeTarget(mergeBB);
}
// Process the <body> block.
spvBuilder.setInsertPoint(bodyBB);
if (body) {
doStmt(body);
}
const Expr *cont = forStmt->getInc();
if (!spvBuilder.isCurrentBasicBlockTerminated())
spvBuilder.createBranch(
continueBB, forStmt->getLocEnd(), nullptr, nullptr,
spv::LoopControlMask::MaskNone,
cont ? cont->getSourceRange()
: SourceRange(forStmt->getLocStart(), forStmt->getLocStart()));
spvBuilder.addSuccessor(continueBB);
// Process the <continue> block. It will jump back to the header.
spvBuilder.setInsertPoint(continueBB);
if (cont) {
doExpr(cont);
}
spvBuilder.createBranch(
headerBB, forStmt->getLocEnd(), nullptr, nullptr,
spv::LoopControlMask::MaskNone,
cont ? cont->getSourceRange()
: SourceRange(forStmt->getLocStart(), forStmt->getLocStart()));
spvBuilder.addSuccessor(headerBB);
// Set insertion point to the <merge> block for subsequent statements.
spvBuilder.setInsertPoint(mergeBB);
// Done with the current scope's continue block and merge block.
continueStack.pop();
breakStack.pop();
}
void SpirvEmitter::doIfStmt(const IfStmt *ifStmt,
llvm::ArrayRef<const Attr *> attrs) {
// if statements are composed of:
// if (<check>) { <then> } else { <else> }
//
// To translate if statements, we'll need to emit the <check> expressions
// in the current basic block, and then create separate basic blocks for
// <then> and <else>. Additionally, we'll need a <merge> block as per
// SPIR-V's structured control flow requirements. Depending whether there
// exists the else branch, the final CFG should normally be like the
// following. Exceptions will occur with non-local exits like loop breaks
// or early returns.
// +-------+ +-------+
// | check | | check |
// +-------+ +-------+
// | |
// +-------+-------+ +-----+-----+
// | true | false | true | false
// v v or v |
// +------+ +------+ +------+ |
// | then | | else | | then | |
// +------+ +------+ +------+ |
// | | | v
// | +-------+ | | +-------+
// +-> | merge | <-+ +---> | merge |
// +-------+ +-------+
{ // Try to see if we can const-eval the condition
bool condition = false;
if (ifStmt->getCond()->EvaluateAsBooleanCondition(condition, astContext)) {
if (condition) {
doStmt(ifStmt->getThen());
} else if (ifStmt->getElse()) {
doStmt(ifStmt->getElse());
}
return;
}
}
auto selectionControl = spv::SelectionControlMask::MaskNone;
if (!attrs.empty()) {
const Attr *attribute = attrs.front();
switch (attribute->getKind()) {
case attr::HLSLBranch:
selectionControl = spv::SelectionControlMask::DontFlatten;
break;
case attr::HLSLFlatten:
selectionControl = spv::SelectionControlMask::Flatten;
break;
default:
// warning emitted in hlsl::ProcessStmtAttributeForHLSL
break;
}
}
if (const auto *declStmt = ifStmt->getConditionVariableDeclStmt())
doDeclStmt(declStmt);
// First emit the instruction for evaluating the condition.
auto *cond = ifStmt->getCond();
auto *condition = doExpr(cond);
// Then we need to emit the instruction for the conditional branch.
// We'll need the <label-id> for the then/else/merge block to do so.
const bool hasElse = ifStmt->getElse() != nullptr;
auto *thenBB = spvBuilder.createBasicBlock("if.true");
auto *mergeBB = spvBuilder.createBasicBlock("if.merge");
auto *elseBB = hasElse ? spvBuilder.createBasicBlock("if.false") : mergeBB;
// Create the branch instruction. This will end the current basic block.
const auto *then = ifStmt->getThen();
spvBuilder.createConditionalBranch(
condition, thenBB, elseBB, then->getLocStart(), mergeBB,
/*continue*/ 0, selectionControl, spv::LoopControlMask::MaskNone,
cond->getSourceRange());
spvBuilder.addSuccessor(thenBB);
spvBuilder.addSuccessor(elseBB);
// The current basic block has the OpSelectionMerge instruction. We need
// to record its merge target.
spvBuilder.setMergeTarget(mergeBB);
// Handle the then branch
spvBuilder.setInsertPoint(thenBB);
doStmt(then);
if (!spvBuilder.isCurrentBasicBlockTerminated())
spvBuilder.createBranch(mergeBB, ifStmt->getLocEnd(), nullptr, nullptr,
spv::LoopControlMask::MaskNone,
SourceRange(then->getLocEnd(), then->getLocEnd()));
spvBuilder.addSuccessor(mergeBB);
// Handle the else branch (if exists)
if (hasElse) {
spvBuilder.setInsertPoint(elseBB);
const auto *elseStmt = ifStmt->getElse();
doStmt(elseStmt);
if (!spvBuilder.isCurrentBasicBlockTerminated())
spvBuilder.createBranch(
mergeBB, elseStmt->getLocEnd(), nullptr, nullptr,
spv::LoopControlMask::MaskNone,
SourceRange(elseStmt->getLocEnd(), elseStmt->getLocEnd()));
spvBuilder.addSuccessor(mergeBB);
}
// From now on, we'll emit instructions into the merge block.
spvBuilder.setInsertPoint(mergeBB);
}
void SpirvEmitter::doReturnStmt(const ReturnStmt *stmt) {
const auto *retVal = stmt->getRetValue();
bool returnsVoid = curFunction->getReturnType().getTypePtr()->isVoidType();
if (!returnsVoid) {
assert(retVal);
// Update counter variable associated with function returns
tryToAssignCounterVar(curFunction, retVal);
auto *retInfo = loadIfGLValue(retVal);
if (!retInfo)
return;
auto retType = retVal->getType();
if (retInfo->getLayoutRule() != SpirvLayoutRule::Void &&
retType->isStructureType()) {
// We are returning some value from a non-Function storage class. Need to
// create a temporary variable to "convert" the value to Function storage
// class and then return.
auto *tempVar =
spvBuilder.addFnVar(retType, retVal->getLocEnd(), "temp.var.ret");
storeValue(tempVar, retInfo, retType, retVal->getLocEnd());
spvBuilder.createReturnValue(
spvBuilder.createLoad(retType, tempVar, retVal->getLocEnd()),
stmt->getReturnLoc());
} else {
spvBuilder.createReturnValue(retInfo, stmt->getReturnLoc(),
{stmt->getReturnLoc(), retVal->getLocEnd()});
}
} else {
if (retVal) {
loadIfGLValue(retVal);
}
spvBuilder.createReturn(stmt->getReturnLoc());
}
// We are translating a ReturnStmt, we should be in some function's body.
assert(curFunction->hasBody());
// If this return statement is the last statement in the function, then
// whe have no more work to do.
if (cast<CompoundStmt>(curFunction->getBody())->body_back() == stmt)
return;
// Some statements that alter the control flow (break, continue, return, and
// discard), require creation of a new basic block to hold any statement that
// may follow them. In this case, the newly created basic block will contain
// any statement that may come after an early return.
auto *newBB = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(newBB);
}
void SpirvEmitter::doBreakStmt(const BreakStmt *breakStmt) {
assert(!spvBuilder.isCurrentBasicBlockTerminated());
auto *breakTargetBB = breakStack.top();
spvBuilder.addSuccessor(breakTargetBB);
spvBuilder.createBranch(breakTargetBB, breakStmt->getLocStart());
// Some statements that alter the control flow (break, continue, return, and
// discard), require creation of a new basic block to hold any statement that
// may follow them. For example: StmtB and StmtC below are put inside a new
// basic block which is unreachable.
//
// while (true) {
// StmtA;
// break;
// StmtB;
// StmtC;
// }
auto *newBB = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(newBB);
}
void SpirvEmitter::doSwitchStmt(const SwitchStmt *switchStmt,
llvm::ArrayRef<const Attr *> attrs) {
// Switch statements are composed of:
// switch (<condition variable>) {
// <CaseStmt>
// <CaseStmt>
// <CaseStmt>
// <DefaultStmt> (optional)
// }
//
// +-------+
// | check |
// +-------+
// |
// +-------+-------+----------------+---------------+
// | 1 | 2 | 3 | (others)
// v v v v
// +-------+ +-------------+ +-------+ +------------+
// | case1 | | case2 | | case3 | ... | default |
// | | |(fallthrough)|---->| | | (optional) |
// +-------+ |+------------+ +-------+ +------------+
// | | |
// | | |
// | +-------+ | |
// | | | <--------------------+ |
// +-> | merge | |
// | | <-------------------------------------+
// +-------+
// If no attributes are given, or if "forcecase" attribute was provided,
// we'll do our best to use OpSwitch if possible.
// If any of the cases compares to a variable (rather than an integer
// literal), we cannot use OpSwitch because OpSwitch expects literal
// numbers as parameters.
const bool isAttrForceCase =
!attrs.empty() && attrs.front()->getKind() == attr::HLSLForceCase;
const bool canUseSpirvOpSwitch =
(attrs.empty() || isAttrForceCase) &&
allSwitchCasesAreIntegerLiterals(switchStmt->getBody());
if (isAttrForceCase && !canUseSpirvOpSwitch &&
!spirvOptions.noWarnIgnoredFeatures) {
emitWarning("ignored 'forcecase' attribute for the switch statement "
"since one or more case values are not integer literals",
switchStmt->getLocStart());
}
if (canUseSpirvOpSwitch)
processSwitchStmtUsingSpirvOpSwitch(switchStmt);
else
processSwitchStmtUsingIfStmts(switchStmt);
}
SpirvInstruction *
SpirvEmitter::doArraySubscriptExpr(const ArraySubscriptExpr *expr,
SourceRange rangeOverride) {
Expr *base = const_cast<Expr *>(expr->getBase()->IgnoreParenLValueCasts());
auto *info = loadIfAliasVarRef(base);
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
if (!info) {
return info;
}
// The index into an array must be an integer number.
const auto *idxExpr = expr->getIdx();
const auto idxExprType = idxExpr->getType();
SpirvInstruction *thisIndex = loadIfGLValue(idxExpr);
if (!idxExprType->isIntegerType() || idxExprType->isBooleanType()) {
thisIndex = castToInt(thisIndex, idxExprType, astContext.UnsignedIntTy,
idxExpr->getExprLoc());
}
llvm::SmallVector<SpirvInstruction *, 4> indices = {thisIndex};
SpirvInstruction *loadVal =
derefOrCreatePointerToValue(base->getType(), info, expr->getType(),
indices, base->getExprLoc(), range);
// TODO(#6259): This maintains the same incorrect behaviour as before.
// When GetAttributeAtVertex is used, the array will be duplicated instead
// of duplicating the elements of the array. This means that the access chain
// feeding this one needs to be marked no interpolation, but this access chain
// does not. However, this is still wrong in cases that were wrong before.
loadVal->setNoninterpolated(false);
return loadVal;
}
SpirvInstruction *SpirvEmitter::doBinaryOperator(const BinaryOperator *expr) {
const auto opcode = expr->getOpcode();
// Handle assignment first since we need to evaluate rhs before lhs.
// For other binary operations, we need to evaluate lhs before rhs.
if (opcode == BO_Assign) {
// Update counter variable associated with lhs of assignments
tryToAssignCounterVar(expr->getLHS(), expr->getRHS());
return processAssignment(expr->getLHS(), loadIfGLValue(expr->getRHS()),
/*isCompoundAssignment=*/false, nullptr,
expr->getSourceRange());
}
// Try to optimize floatMxN * float and floatN * float case
if (opcode == BO_Mul) {
if (auto *result = tryToGenFloatMatrixScale(expr))
return result;
if (auto *result = tryToGenFloatVectorScale(expr))
return result;
}
return processBinaryOp(expr->getLHS(), expr->getRHS(), opcode,
expr->getLHS()->getType(), expr->getType(),
expr->getSourceRange(), expr->getOperatorLoc());
}
SpirvInstruction *SpirvEmitter::doCallExpr(const CallExpr *callExpr,
SourceRange rangeOverride) {
if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(callExpr)) {
if (const auto *cxxMethodDecl =
dyn_cast<CXXMethodDecl>(operatorCall->getCalleeDecl())) {
QualType parentType =
QualType(cxxMethodDecl->getParent()->getTypeForDecl(), 0);
if (hlsl::IsUserDefinedRecordType(parentType)) {
// If the parent is a user-defined record type
return processCall(callExpr);
}
}
return doCXXOperatorCallExpr(operatorCall, rangeOverride);
}
if (const auto *memberCall = dyn_cast<CXXMemberCallExpr>(callExpr))
return doCXXMemberCallExpr(memberCall);
auto funcDecl = callExpr->getDirectCallee();
if (funcDecl) {
if (funcDecl->hasAttr<VKInstructionExtAttr>())
return processSpvIntrinsicCallExpr(callExpr);
else if (funcDecl->hasAttr<VKTypeDefExtAttr>())
return processSpvIntrinsicTypeDef(callExpr);
}
// Intrinsic functions such as 'dot' or 'mul'
if (hlsl::IsIntrinsicOp(funcDecl)) {
return processIntrinsicCallExpr(callExpr);
}
// Handle 'vk::RawBufferLoad()'
if (isVkRawBufferLoadIntrinsic(funcDecl)) {
return processRawBufferLoad(callExpr);
}
// Normal standalone functions
return processCall(callExpr);
}
SpirvInstruction *SpirvEmitter::getBaseOfMemberFunction(
QualType objectType, SpirvInstruction *objInstr,
const CXXMethodDecl *memberFn, SourceLocation loc) {
// If objectType is different from the parent of memberFn, memberFn should be
// defined in a base struct/class of objectType. We create OpAccessChain with
// index 0 while iterating bases of objectType until we find the base with
// the definition of memberFn.
if (const auto *ptrType = objectType->getAs<PointerType>()) {
if (const auto *recordType =
ptrType->getPointeeType()->getAs<RecordType>()) {
const auto *parentDeclOfMemberFn = memberFn->getParent();
if (recordType->getDecl() != parentDeclOfMemberFn) {
const auto *cxxRecordDecl =
dyn_cast<CXXRecordDecl>(recordType->getDecl());
auto *zero = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, 0));
for (auto baseItr = cxxRecordDecl->bases_begin(),
itrEnd = cxxRecordDecl->bases_end();
baseItr != itrEnd; baseItr++) {
const auto *baseType = baseItr->getType()->getAs<RecordType>();
objectType = astContext.getPointerType(baseType->desugar());
objInstr =
spvBuilder.createAccessChain(objectType, objInstr, {zero}, loc);
if (baseType->getDecl() == parentDeclOfMemberFn)
return objInstr;
}
}
}
}
return nullptr;
}
SpirvInstruction *SpirvEmitter::processCall(const CallExpr *callExpr) {
const FunctionDecl *callee = getCalleeDefinition(callExpr);
// Note that we always want the definition because Stmts/Exprs in the
// function body reference the parameters in the definition.
if (!callee) {
emitError("found undefined function", callExpr->getExprLoc());
return nullptr;
}
const auto paramTypeMatchesArgType = [](QualType paramType,
QualType argType) {
if (argType == paramType)
return true;
if (const auto *refType = paramType->getAs<ReferenceType>())
paramType = refType->getPointeeType();
auto argUnqualifiedType = argType->getUnqualifiedDesugaredType();
auto paramUnqualifiedType = paramType->getUnqualifiedDesugaredType();
if (argUnqualifiedType == paramUnqualifiedType)
return true;
return false;
};
const auto numParams = callee->getNumParams();
bool isNonStaticMemberCall = false;
bool isOperatorOverloading = false;
QualType objectType = {}; // Type of the object (if exists)
SpirvInstruction *objInstr = nullptr; // EvalInfo for the object (if exists)
const Expr *object;
llvm::SmallVector<SpirvInstruction *, 4> vars; // Variables for function call
llvm::SmallVector<bool, 4> isTempVar; // Temporary variable or not
llvm::SmallVector<SpirvInstruction *, 4> args; // Evaluated arguments
if (const auto *memberCall = dyn_cast<CXXMemberCallExpr>(callExpr)) {
const auto *memberFn = cast<CXXMethodDecl>(memberCall->getCalleeDecl());
isNonStaticMemberCall = !memberFn->isStatic();
if (isNonStaticMemberCall) {
// For non-static member calls, evaluate the object and pass it as the
// first argument.
object = memberCall->getImplicitObjectArgument();
object = object->IgnoreParenNoopCasts(astContext);
// Update counter variable associated with the implicit object
tryToAssignCounterVar(getOrCreateDeclForMethodObject(memberFn), object);
objectType = object->getType();
objInstr = doExpr(object);
if (auto *accessToBaseInstr = getBaseOfMemberFunction(
objectType, objInstr, memberFn, memberCall->getExprLoc())) {
objInstr = accessToBaseInstr;
objectType = accessToBaseInstr->getAstResultType();
}
}
} else if (const auto *operatorCallExpr =
dyn_cast<CXXOperatorCallExpr>(callExpr)) {
isOperatorOverloading = true;
isNonStaticMemberCall = true;
// For overloaded operator calls, the first argument is considered as the
// object.
object = operatorCallExpr->getArg(0);
object = object->IgnoreParenNoopCasts(astContext);
objectType = object->getType();
objInstr = doExpr(object);
}
if (objInstr != nullptr) {
// If not already a variable, we need to create a temporary variable and
// pass the object pointer to the function. Example:
// getObject().objectMethod();
// Also, any parameter passed to the member function must be of Function
// storage class.
if (objInstr->isRValue()) {
args.push_back(createTemporaryVar(
objectType, getAstTypeName(objectType),
// May need to load to use as initializer
loadIfGLValue(object, objInstr), object->getLocStart()));
} else {
// Based on SPIR-V spec, function parameter must always be in Function
// scope. If we pass a non-function scope argument, we need
// the legalization.
if (objInstr->getStorageClass() != spv::StorageClass::Function ||
!isMemoryObjectDeclaration(objInstr))
needsLegalization = true;
args.push_back(objInstr);
}
// We do not need to create a new temporary variable for the this
// object. Use the evaluated argument.
vars.push_back(args.back());
isTempVar.push_back(false);
}
// Evaluate parameters
for (uint32_t i = 0; i < numParams; ++i) {
// Arguments for the overloaded operator includes the object itself. The
// actual argument starts from the second one.
const uint32_t argIndex = i + isOperatorOverloading;
// We want the argument variable here so that we can write back to it
// later. We will do the OpLoad of this argument manually. So ignore
// the LValueToRValue implicit cast here.
auto *arg = callExpr->getArg(argIndex)->IgnoreParenLValueCasts();
const auto *param = callee->getParamDecl(i);
const auto paramType = param->getType();
if (isResourceDescriptorHeap(paramType) ||
isSamplerDescriptorHeap(paramType)) {
emitError(
"Resource/sampler heaps are not allowed as function parameters.",
param->getLocStart());
return nullptr;
}
// Get the evaluation info if this argument is referencing some variable
// *as a whole*, in which case we can avoid creating the temporary variable
// for it if it can act as out parameter.
SpirvInstruction *argInfo = nullptr;
if (const auto *declRefExpr = dyn_cast<DeclRefExpr>(arg)) {
argInfo = declIdMapper.getDeclEvalInfo(declRefExpr->getDecl(),
arg->getLocStart());
}
auto *argInst = doExpr(arg);
bool isArgGlobalVarWithResourceType =
argInfo && argInfo->getStorageClass() != spv::StorageClass::Function &&
isResourceType(paramType);
// If argInfo is nullptr and argInst is a rvalue, we do not have a proper
// pointer to pass to the function. we need a temporary variable in that
// case.
//
// If we have an 'out/inout' resource as function argument, we need to
// create a temporary variable for it because the function definition
// expects are point-to-pointer argument for resources, which will be
// resolved by legalization.
if ((argInfo || (argInst && !argInst->isRValue())) &&
canActAsOutParmVar(param) && !isArgGlobalVarWithResourceType &&
paramTypeMatchesArgType(paramType, arg->getType())) {
// Based on SPIR-V spec, function parameter must be always Function
// scope. In addition, we must pass memory object declaration argument
// to function. If we pass an argument that is not function scope
// or not memory object declaration, we need the legalization.
if (!argInfo || argInfo->getStorageClass() != spv::StorageClass::Function)
needsLegalization = true;
isTempVar.push_back(false);
args.push_back(argInst);
vars.push_back(argInfo ? argInfo : argInst);
} else {
// We need to create variables for holding the values to be used as
// arguments. The variables themselves are of pointer types.
const QualType varType =
declIdMapper.getTypeAndCreateCounterForPotentialAliasVar(param);
const std::string varName = "param.var." + param->getNameAsString();
// Temporary "param.var.*" variables are used for OpFunctionCall purposes.
// 'precise' attribute on function parameters only affect computations
// inside the function, not the variables at the call sites. Therefore, we
// do not need to mark the "param.var.*" variables as precise.
const bool isPrecise = false;
const bool isNoInterp = param->hasAttr<HLSLNoInterpolationAttr>() ||
(argInst && argInst->isNoninterpolated());
auto *tempVar = spvBuilder.addFnVar(varType, arg->getLocStart(), varName,
isPrecise, isNoInterp);
vars.push_back(tempVar);
isTempVar.push_back(true);
args.push_back(argInst);
// Update counter variable associated with function parameters
tryToAssignCounterVar(param, arg);
// Manually load the argument here
auto *rhsVal = loadIfGLValue(arg, args.back());
auto rhsRange = arg->getSourceRange();
// The AST does not include cast nodes to and from the function parameter
// type for 'out' and 'inout' cases. Example:
//
// void foo(out half3 param) {...}
// void main() { float3 arg; foo(arg); }
//
// In such cases, we first do a manual cast before passing the argument to
// the function. And we will cast back the results once the function call
// has returned.
if (canActAsOutParmVar(param) &&
!paramTypeMatchesArgType(paramType, arg->getType())) {
if (const auto *refType = paramType->getAs<ReferenceType>()) {
QualType toType = refType->getPointeeType();
if (isScalarType(rhsVal->getAstResultType())) {
rhsVal =
splatScalarToGenerate(toType, rhsVal, SpirvLayoutRule::Void);
} else {
rhsVal = castToType(rhsVal, rhsVal->getAstResultType(), toType,
arg->getLocStart(), rhsRange);
}
}
}
// Initialize the temporary variables using the contents of the arguments
storeValue(tempVar, rhsVal, paramType, arg->getLocStart(), rhsRange);
}
}
assert(vars.size() == isTempVar.size());
assert(vars.size() == args.size());
// Push the callee into the work queue if it is not there.
addFunctionToWorkQueue(spvContext.getCurrentShaderModelKind(), callee,
/*isEntryFunction*/ false);
const QualType retType =
declIdMapper.getTypeAndCreateCounterForPotentialAliasVar(callee);
// Get or forward declare the function <result-id>
SpirvFunction *func = declIdMapper.getOrRegisterFn(callee);
auto *retVal = spvBuilder.createFunctionCall(
retType, func, vars, callExpr->getCallee()->getExprLoc(),
callExpr->getSourceRange());
// Go through all parameters and write those marked as out/inout
for (uint32_t i = 0; i < numParams; ++i) {
const auto *param = callee->getParamDecl(i);
const auto paramType = param->getType();
// If it calls a non-static member function, the object itself is argument
// 0, and therefore all other argument positions are shifted by 1.
const uint32_t index = i + isNonStaticMemberCall;
// Using a resouce as a function parameter is never passed-by-copy. As a
// result, even if the function parameter is marked as 'out' or 'inout',
// there is no reason to copy back the results after the function call into
// the resource.
if (isTempVar[index] && canActAsOutParmVar(param) &&
!isResourceType(paramType)) {
// Arguments for the overloaded operator includes the object itself. The
// actual argument starts from the second one.
const uint32_t argIndex = i + isOperatorOverloading;
const auto *arg = callExpr->getArg(argIndex);
SpirvInstruction *value =
spvBuilder.createLoad(paramType, vars[index], arg->getLocStart());
// Now we want to assign 'value' to arg. But first, in rare cases when
// using 'out' or 'inout' where the parameter and argument have a type
// mismatch, we need to first cast 'value' to the type of 'arg' because
// the AST will not include a cast node.
if (!paramTypeMatchesArgType(paramType, arg->getType())) {
if (const auto *refType = paramType->getAs<ReferenceType>()) {
QualType elementType;
QualType fromType = refType->getPointeeType();
if (isVectorType(fromType, &elementType) &&
isScalarType(arg->getType())) {
value = spvBuilder.createCompositeExtract(
elementType, value, {0}, value->getSourceLocation());
fromType = elementType;
}
value =
castToType(value, fromType, arg->getType(), arg->getLocStart());
}
}
processAssignment(arg, value, false, args[index]);
}
}
return retVal;
}
SpirvInstruction *SpirvEmitter::doCastExpr(const CastExpr *expr,
SourceRange rangeOverride) {
const Expr *subExpr = expr->getSubExpr();
const QualType subExprType = subExpr->getType();
const QualType toType = expr->getType();
const auto srcLoc = expr->getExprLoc();
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
// The AST type for descriptor heap is not well defined. This means we needed
// to look at the destination type already to generate the source type.
// This makes implicit casts from heaps useless, and we can ignore them.
// If you want to remove this check, the flat conversion heap->type needs to
// be implemented, which would mostly duplicate the initial heap creation
// code.
if (isResourceDescriptorHeap(subExprType) ||
isSamplerDescriptorHeap(subExprType)) {
return doExpr(subExpr, range);
}
switch (expr->getCastKind()) {
case CastKind::CK_LValueToRValue:
return loadIfGLValue(subExpr, range);
case CastKind::CK_NoOp:
return doExpr(subExpr, range);
case CastKind::CK_IntegralCast:
case CastKind::CK_FloatingToIntegral:
case CastKind::CK_HLSLCC_IntegralCast:
case CastKind::CK_HLSLCC_FloatingToIntegral: {
// Integer literals in the AST are represented using 64bit APInt
// themselves and then implicitly casted into the expected bitwidth.
// We need special treatment of integer literals here because generating
// a 64bit constant and then explicit casting in SPIR-V requires Int64
// capability. We should avoid introducing unnecessary capabilities to
// our best.
if (auto *value =
constEvaluator.tryToEvaluateAsConst(expr, isSpecConstantMode)) {
value->setRValue();
return value;
}
auto *value = castToInt(loadIfGLValue(subExpr), subExprType, toType,
subExpr->getLocStart(), range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
case CastKind::CK_FloatingCast:
case CastKind::CK_IntegralToFloating:
case CastKind::CK_HLSLCC_FloatingCast:
case CastKind::CK_HLSLCC_IntegralToFloating: {
// First try to see if we can do constant folding for floating point
// numbers like what we are doing for integers in the above.
if (auto *value =
constEvaluator.tryToEvaluateAsConst(expr, isSpecConstantMode)) {
value->setRValue();
return value;
}
auto *value = castToFloat(loadIfGLValue(subExpr), subExprType, toType,
subExpr->getLocStart(), range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
case CastKind::CK_IntegralToBoolean:
case CastKind::CK_FloatingToBoolean:
case CastKind::CK_HLSLCC_IntegralToBoolean:
case CastKind::CK_HLSLCC_FloatingToBoolean: {
// First try to see if we can do constant folding.
if (auto *value =
constEvaluator.tryToEvaluateAsConst(expr, isSpecConstantMode)) {
value->setRValue();
return value;
}
auto *value = castToBool(loadIfGLValue(subExpr), subExprType, toType,
subExpr->getLocStart(), range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
case CastKind::CK_HLSLVectorSplat: {
const size_t size = hlsl::GetHLSLVecSize(expr->getType());
return createVectorSplat(subExpr, size, range);
}
case CastKind::CK_HLSLVectorTruncationCast: {
const QualType toVecType = toType;
const QualType elemType = hlsl::GetHLSLVecElementType(toType);
const auto toSize = hlsl::GetHLSLVecSize(toType);
auto *composite = doExpr(subExpr, range);
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < toSize; ++i) {
elements.push_back(spvBuilder.createCompositeExtract(
elemType, composite, {i}, expr->getExprLoc(), range));
}
auto *value = elements.front();
if (toSize > 1) {
value = spvBuilder.createCompositeConstruct(toVecType, elements,
expr->getExprLoc(), range);
}
if (!value)
return nullptr;
value->setRValue();
return value;
}
case CastKind::CK_HLSLVectorToScalarCast: {
// The underlying should already be a vector of size 1.
assert(hlsl::GetHLSLVecSize(subExprType) == 1);
return doExpr(subExpr, range);
}
case CastKind::CK_HLSLVectorToMatrixCast: {
// If target type is already an 1xN or Mx1 matrix type, we just return the
// underlying vector.
if (is1xNMatrix(toType) || isMx1Matrix(toType))
return doExpr(subExpr, range);
// A vector can have no more than 4 elements. The only remaining case
// is casting from size-4 vector to size-2-by-2 matrix.
auto *vec = loadIfGLValue(subExpr, range);
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
const bool isMat = isMxNMatrix(toType, &elemType, &rowCount, &colCount);
assert(isMat && rowCount == 2 && colCount == 2);
(void)isMat;
QualType vec2Type = astContext.getExtVectorType(elemType, 2);
auto *subVec1 = spvBuilder.createVectorShuffle(vec2Type, vec, vec, {0, 1},
expr->getLocStart(), range);
auto *subVec2 = spvBuilder.createVectorShuffle(vec2Type, vec, vec, {2, 3},
expr->getLocStart(), range);
auto *mat = spvBuilder.createCompositeConstruct(toType, {subVec1, subVec2},
expr->getLocStart(), range);
if (!mat)
return nullptr;
mat->setRValue();
return mat;
}
case CastKind::CK_HLSLMatrixSplat: {
// From scalar to matrix
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(toType, rowCount, colCount);
// Handle degenerated cases first
if (rowCount == 1 && colCount == 1)
return doExpr(subExpr, range);
if (colCount == 1)
return createVectorSplat(subExpr, rowCount, range);
const auto vecSplat = createVectorSplat(subExpr, colCount, range);
if (rowCount == 1)
return vecSplat;
if (isa<SpirvConstant>(vecSplat)) {
llvm::SmallVector<SpirvConstant *, 4> vectors(
size_t(rowCount), cast<SpirvConstant>(vecSplat));
auto *value = spvBuilder.getConstantComposite(toType, vectors);
if (!value)
return nullptr;
value->setRValue();
return value;
} else {
llvm::SmallVector<SpirvInstruction *, 4> vectors(size_t(rowCount),
vecSplat);
auto *value = spvBuilder.createCompositeConstruct(
toType, vectors, expr->getLocEnd(), range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
}
case CastKind::CK_HLSLMatrixTruncationCast: {
const QualType srcType = subExprType;
auto *src = doExpr(subExpr, range);
const QualType elemType = hlsl::GetHLSLMatElementType(srcType);
llvm::SmallVector<uint32_t, 4> indexes;
// It is possible that the source matrix is in fact a vector.
// Example 1: Truncate float1x3 --> float1x2.
// Example 2: Truncate float1x3 --> float1x1.
// The front-end disallows float1x3 --> float2x1.
{
uint32_t srcVecSize = 0, dstVecSize = 0;
if (isVectorType(srcType, nullptr, &srcVecSize) && isScalarType(toType)) {
auto *val = spvBuilder.createCompositeExtract(
toType, src, {0}, expr->getLocStart(), range);
if (!val)
return nullptr;
val->setRValue();
return val;
}
if (isVectorType(srcType, nullptr, &srcVecSize) &&
isVectorType(toType, nullptr, &dstVecSize)) {
for (uint32_t i = 0; i < dstVecSize; ++i)
indexes.push_back(i);
auto *val = spvBuilder.createVectorShuffle(toType, src, src, indexes,
expr->getLocStart(), range);
if (!val)
return nullptr;
val->setRValue();
return val;
}
}
uint32_t srcRows = 0, srcCols = 0, dstRows = 0, dstCols = 0;
hlsl::GetHLSLMatRowColCount(srcType, srcRows, srcCols);
hlsl::GetHLSLMatRowColCount(toType, dstRows, dstCols);
const QualType srcRowType = astContext.getExtVectorType(elemType, srcCols);
const QualType dstRowType = astContext.getExtVectorType(elemType, dstCols);
// Indexes to pass to OpVectorShuffle
for (uint32_t i = 0; i < dstCols; ++i)
indexes.push_back(i);
llvm::SmallVector<SpirvInstruction *, 4> extractedVecs;
for (uint32_t row = 0; row < dstRows; ++row) {
// Extract a row
SpirvInstruction *rowInstr = spvBuilder.createCompositeExtract(
srcRowType, src, {row}, expr->getExprLoc(), range);
// Extract the necessary columns from that row.
// The front-end ensures dstCols <= srcCols.
// If dstCols equals srcCols, we can use the whole row directly.
if (dstCols == 1) {
rowInstr = spvBuilder.createCompositeExtract(
elemType, rowInstr, {0}, expr->getLocStart(), range);
} else if (dstCols < srcCols) {
rowInstr =
spvBuilder.createVectorShuffle(dstRowType, rowInstr, rowInstr,
indexes, expr->getLocStart(), range);
}
extractedVecs.push_back(rowInstr);
}
auto *val = extractedVecs.front();
if (extractedVecs.size() > 1) {
val = spvBuilder.createCompositeConstruct(toType, extractedVecs,
expr->getExprLoc(), range);
}
if (!val)
return nullptr;
val->setRValue();
return val;
}
case CastKind::CK_HLSLMatrixToScalarCast: {
// The underlying should already be a matrix of 1x1.
assert(is1x1Matrix(subExprType));
return doExpr(subExpr, range);
}
case CastKind::CK_HLSLMatrixToVectorCast: {
// If the underlying matrix is Mx1 or 1xM for M in {1, 2,3,4}, we can return
// the underlying matrix because it'll be evaluated as a vector by default.
if (is1x1Matrix(subExprType) || is1xNMatrix(subExprType) ||
isMx1Matrix(subExprType))
return doExpr(subExpr, range);
// A vector can have no more than 4 elements. The only remaining case
// is casting from a 2x2 matrix to a vector of size 4.
auto *mat = loadIfGLValue(subExpr, range);
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0, elemCount = 0;
const bool isMat =
isMxNMatrix(subExprType, &elemType, &rowCount, &colCount);
const bool isVec = isVectorType(toType, nullptr, &elemCount);
assert(isMat && rowCount == 2 && colCount == 2);
assert(isVec && elemCount == 4);
(void)isMat;
(void)isVec;
QualType vec2Type = astContext.getExtVectorType(elemType, 2);
auto *row0 =
spvBuilder.createCompositeExtract(vec2Type, mat, {0}, srcLoc, range);
auto *row1 =
spvBuilder.createCompositeExtract(vec2Type, mat, {1}, srcLoc, range);
auto *vec = spvBuilder.createVectorShuffle(toType, row0, row1, {0, 1, 2, 3},
srcLoc, range);
if (!vec)
return nullptr;
vec->setRValue();
return vec;
}
case CastKind::CK_FunctionToPointerDecay:
// Just need to return the function id
return doExpr(subExpr, range);
case CastKind::CK_FlatConversion: {
SpirvInstruction *subExprInstr = nullptr;
QualType evalType = subExprType;
// Optimization: we can use OpConstantNull for cases where we want to
// initialize an entire data structure to zeros.
if (evaluatesToConstZero(subExpr, astContext)) {
subExprInstr = spvBuilder.getConstantNull(toType);
subExprInstr->setRValue();
return subExprInstr;
}
// Try to evaluate float literals as float rather than double.
if (const auto *floatLiteral = dyn_cast<FloatingLiteral>(subExpr)) {
subExprInstr = constEvaluator.tryToEvaluateAsFloat32(
floatLiteral->getValue(), isSpecConstantMode);
if (subExprInstr)
evalType = astContext.FloatTy;
}
// Evaluate 'literal float' initializer type as float rather than double.
// TODO: This could result in rounding error if the initializer is a
// non-literal expression that requires larger than 32 bits and has the
// 'literal float' type.
else if (subExprType->isSpecificBuiltinType(BuiltinType::LitFloat)) {
evalType = astContext.FloatTy;
}
// Try to evaluate integer literals as 32-bit int rather than 64-bit int.
else if (const auto *intLiteral = dyn_cast<IntegerLiteral>(subExpr)) {
const bool isSigned = subExprType->isSignedIntegerType();
subExprInstr =
constEvaluator.tryToEvaluateAsInt32(intLiteral->getValue(), isSigned);
if (subExprInstr)
evalType = isSigned ? astContext.IntTy : astContext.UnsignedIntTy;
}
// For assigning one array instance to another one with the same array type
// (regardless of constness and literalness), the rhs will be wrapped in a
// FlatConversion. Similarly for assigning a struct to another struct with
// identical members.
// |- <lhs>
// `- ImplicitCastExpr <FlatConversion>
// `- ImplicitCastExpr <LValueToRValue>
// `- <rhs>
else if (isSameType(astContext, toType, evalType) ||
// We can have casts changing the shape but without affecting
// memory order, e.g., `float4 a[2]; float b[8] = (float[8])a;`.
// This is also represented as FlatConversion. For such cases, we
// can rely on the InitListHandler, which can decompse
// vectors/matrices.
subExprType->isArrayType()) {
auto *valInstr =
InitListHandler(astContext, *this).processCast(toType, subExpr);
if (valInstr)
valInstr->setRValue();
return valInstr;
}
// We can have casts changing the shape but without affecting memory order,
// e.g., `float4 a[2]; float b[8] = (float[8])a;`. This is also represented
// as FlatConversion. For such cases, we can rely on the InitListHandler,
// which can decompse vectors/matrices.
else if (subExprType->isArrayType()) {
auto *valInstr = InitListHandler(astContext, *this)
.processCast(expr->getType(), subExpr);
if (valInstr)
valInstr->setRValue();
return valInstr;
}
if (!subExprInstr)
subExprInstr = loadIfGLValue(subExpr);
if (!subExprInstr)
return nullptr;
auto *val =
processFlatConversion(toType, subExprInstr, expr->getExprLoc(), range);
val->setRValue();
return val;
}
case CastKind::CK_UncheckedDerivedToBase:
case CastKind::CK_HLSLDerivedToBase: {
// Find the index sequence of the base to which we are casting
llvm::SmallVector<uint32_t, 4> baseIndices;
getBaseClassIndices(expr, &baseIndices);
// Turn them in to SPIR-V constants
llvm::SmallVector<SpirvInstruction *, 4> baseIndexInstructions(
baseIndices.size(), nullptr);
for (uint32_t i = 0; i < baseIndices.size(); ++i)
baseIndexInstructions[i] = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, baseIndices[i]));
auto *derivedInfo = doExpr(subExpr);
return derefOrCreatePointerToValue(subExpr->getType(), derivedInfo,
expr->getType(), baseIndexInstructions,
subExpr->getExprLoc(), range);
}
case CastKind::CK_ArrayToPointerDecay: {
// Literal string to const string conversion falls under this category.
if (hlsl::IsStringLiteralType(subExprType) && hlsl::IsStringType(toType)) {
return doExpr(subExpr, range);
} else {
emitError("implicit cast kind '%0' unimplemented", expr->getExprLoc())
<< expr->getCastKindName() << expr->getSourceRange();
expr->dump();
return 0;
}
}
default:
emitError("implicit cast kind '%0' unimplemented", expr->getExprLoc())
<< expr->getCastKindName() << expr->getSourceRange();
expr->dump();
return 0;
}
}
SpirvInstruction *
SpirvEmitter::processFlatConversion(const QualType type,
SpirvInstruction *initInstr,
SourceLocation srcLoc, SourceRange range) {
// If the same literal is used in multiple instructions, then the literal
// visitor may not be able to pick the correct type for the literal. That
// happens when say one instruction uses the literal as a float and another
// uses it as a double. We solve this by setting the type for the literal to
// its 32-bit equivalent.
//
// TODO(6188): This is wrong when the literal is too large to be held in
// the the 32-bit type. We do this because it is consistent with the long
// standing behaviour. Changing now would result in more 64-bit arithmetic,
// which the optimizer does not handle as well.
QualType resultType = initInstr->getAstResultType();
if (resultType->isSpecificBuiltinType(BuiltinType::LitFloat)) {
initInstr->setAstResultType(astContext.FloatTy);
} else if (resultType->isSpecificBuiltinType(BuiltinType::LitInt)) {
if (resultType->isSignedIntegerType())
initInstr->setAstResultType(astContext.LongLongTy);
else
initInstr->setAstResultType(astContext.UnsignedLongLongTy);
}
// Decompose `initInstr`.
std::vector<SpirvInstruction *> flatValues = decomposeToScalars(initInstr);
if (flatValues.size() == 1) {
return splatScalarToGenerate(type, flatValues[0], SpirvLayoutRule::Void);
}
return generateFromScalars(type, flatValues, SpirvLayoutRule::Void);
}
SpirvInstruction *
SpirvEmitter::doCompoundAssignOperator(const CompoundAssignOperator *expr) {
const auto opcode = expr->getOpcode();
// Try to optimize floatMxN *= float and floatN *= float case
if (opcode == BO_MulAssign) {
if (auto *result = tryToGenFloatMatrixScale(expr))
return result;
if (auto *result = tryToGenFloatVectorScale(expr))
return result;
}
const auto *rhs = expr->getRHS();
const auto *lhs = expr->getLHS();
SpirvInstruction *lhsPtr = nullptr;
auto *result = processBinaryOp(
lhs, rhs, opcode, expr->getComputationLHSType(), expr->getType(),
expr->getSourceRange(), expr->getOperatorLoc(), &lhsPtr);
return processAssignment(lhs, result, true, lhsPtr, expr->getSourceRange());
}
SpirvInstruction *SpirvEmitter::doShortCircuitedConditionalOperator(
const ConditionalOperator *expr) {
const auto type = expr->getType();
const SourceLocation loc = expr->getExprLoc();
const SourceRange range = expr->getSourceRange();
const Expr *cond = expr->getCond();
const Expr *falseExpr = expr->getFalseExpr();
const Expr *trueExpr = expr->getTrueExpr();
// Short-circuited operators can only be used with scalar conditions. This
// is checked earlier.
assert(cond->getType()->isScalarType());
auto *tempVar = spvBuilder.addFnVar(type, loc, "temp.var.ternary");
auto *thenBB = spvBuilder.createBasicBlock("ternary.lhs");
auto *elseBB = spvBuilder.createBasicBlock("ternary.rhs");
auto *mergeBB = spvBuilder.createBasicBlock("ternary.merge");
// Create the branch instruction. This will end the current basic block.
SpirvInstruction *condition = loadIfGLValue(cond);
condition = castToBool(condition, cond->getType(), astContext.BoolTy,
cond->getLocEnd());
spvBuilder.createConditionalBranch(condition, thenBB, elseBB, loc, mergeBB);
spvBuilder.addSuccessor(thenBB);
spvBuilder.addSuccessor(elseBB);
spvBuilder.setMergeTarget(mergeBB);
// Handle the true case.
spvBuilder.setInsertPoint(thenBB);
SpirvInstruction *trueVal = loadIfGLValue(trueExpr);
trueVal = castToType(trueVal, trueExpr->getType(), type,
trueExpr->getExprLoc(), range);
if (!trueVal)
return nullptr;
spvBuilder.createStore(tempVar, trueVal, trueExpr->getLocStart(), range);
spvBuilder.createBranch(mergeBB, trueExpr->getLocEnd());
spvBuilder.addSuccessor(mergeBB);
// Handle the false case.
spvBuilder.setInsertPoint(elseBB);
SpirvInstruction *falseVal = loadIfGLValue(falseExpr);
falseVal = castToType(falseVal, falseExpr->getType(), type,
falseExpr->getExprLoc(), range);
if (!falseVal)
return nullptr;
spvBuilder.createStore(tempVar, falseVal, falseExpr->getLocStart(), range);
spvBuilder.createBranch(mergeBB, falseExpr->getLocEnd());
spvBuilder.addSuccessor(mergeBB);
// From now on, emit instructions into the merge block.
spvBuilder.setInsertPoint(mergeBB);
SpirvInstruction *result = spvBuilder.createLoad(type, tempVar, loc, range);
if (!result)
return nullptr;
result->setRValue();
return result;
}
SpirvInstruction *SpirvEmitter::doConditional(const Expr *expr,
const Expr *cond,
const Expr *falseExpr,
const Expr *trueExpr) {
const auto type = expr->getType();
const SourceLocation loc = expr->getExprLoc();
const SourceRange range = expr->getSourceRange();
// Corner-case: In HLSL, the condition of the ternary operator can be a
// matrix of booleans which results in selecting between components of two
// matrices. However, a matrix of booleans is not a valid type in SPIR-V.
// If the AST has inserted a splat of a scalar/vector to a matrix, we can just
// use that scalar/vector as an if-clause condition.
if (auto *cast = dyn_cast<ImplicitCastExpr>(cond))
if (cast->getCastKind() == CK_HLSLMatrixSplat)
cond = cast->getSubExpr();
// If we are selecting between two SampleState objects, none of the three
// operands has a LValueToRValue implicit cast.
auto *condition = loadIfGLValue(cond);
auto *trueBranch = loadIfGLValue(trueExpr);
auto *falseBranch = loadIfGLValue(falseExpr);
// Corner-case: In HLSL, the condition of the ternary operator can be a
// matrix of booleans which results in selecting between components of two
// matrices. However, a matrix of booleans is not a valid type in SPIR-V.
// Therefore, we need to perform OpSelect for each row of the matrix.
{
QualType condElemType = {}, elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount) &&
isMxNMatrix(cond->getType(), &condElemType) &&
condElemType->isBooleanType()) {
const auto rowType = astContext.getExtVectorType(elemType, colCount);
const auto condRowType =
astContext.getExtVectorType(condElemType, colCount);
llvm::SmallVector<SpirvInstruction *, 4> rows;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *condRow = spvBuilder.createCompositeExtract(
condRowType, condition, {i}, loc, range);
auto *trueRow = spvBuilder.createCompositeExtract(rowType, trueBranch,
{i}, loc, range);
auto *falseRow = spvBuilder.createCompositeExtract(rowType, falseBranch,
{i}, loc, range);
rows.push_back(spvBuilder.createSelect(rowType, condRow, trueRow,
falseRow, loc, range));
}
auto *result =
spvBuilder.createCompositeConstruct(type, rows, loc, range);
if (!result)
return nullptr;
result->setRValue();
return result;
}
}
// For cases where the return type is a scalar or a vector, we can use
// OpSelect to choose between the two. OpSelect's return type must be either
// scalar or vector.
if (isScalarType(type) || isVectorType(type)) {
// The SPIR-V OpSelect instruction must have a selection argument that is
// the same size as the return type. If the return type is a vector, the
// selection must be a vector of booleans (one per output component).
uint32_t count = 0;
if (isVectorType(expr->getType(), nullptr, &count) &&
!isVectorType(cond->getType())) {
const llvm::SmallVector<SpirvInstruction *, 4> components(size_t(count),
condition);
condition = spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(astContext.BoolTy, count), components,
cond->getLocEnd());
}
auto *value = spvBuilder.createSelect(type, condition, trueBranch,
falseBranch, loc, range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
// Usually integer conditional types in HLSL will be wrapped in an
// ImplicitCastExpr<IntegralToBoolean> in the Clang AST. However, some
// combinations of result types can result in a bare integer (literal or
// reference) as a condition, which still needs to be cast to bool.
if (cond->getType()->isIntegerType()) {
condition =
castToBool(condition, cond->getType(), astContext.BoolTy, loc, range);
}
// If we can't use OpSelect, we need to create if-else control flow.
auto *tempVar = spvBuilder.addFnVar(type, loc, "temp.var.ternary");
auto *thenBB = spvBuilder.createBasicBlock("if.true");
auto *mergeBB = spvBuilder.createBasicBlock("if.merge");
auto *elseBB = spvBuilder.createBasicBlock("if.false");
// Create the branch instruction. This will end the current basic block.
spvBuilder.createConditionalBranch(condition, thenBB, elseBB,
cond->getLocEnd(), mergeBB);
spvBuilder.addSuccessor(thenBB);
spvBuilder.addSuccessor(elseBB);
spvBuilder.setMergeTarget(mergeBB);
// Handle the then branch
spvBuilder.setInsertPoint(thenBB);
spvBuilder.createStore(tempVar, trueBranch, trueExpr->getLocStart(), range);
spvBuilder.createBranch(mergeBB, trueExpr->getLocEnd());
spvBuilder.addSuccessor(mergeBB);
// Handle the else branch
spvBuilder.setInsertPoint(elseBB);
spvBuilder.createStore(tempVar, falseBranch, falseExpr->getLocStart(), range);
spvBuilder.createBranch(mergeBB, falseExpr->getLocEnd());
spvBuilder.addSuccessor(mergeBB);
// From now on, emit instructions into the merge block.
spvBuilder.setInsertPoint(mergeBB);
auto *result = spvBuilder.createLoad(type, tempVar, expr->getLocEnd(), range);
if (!result)
return nullptr;
result->setRValue();
return result;
}
SpirvInstruction *
SpirvEmitter::processByteAddressBufferStructuredBufferGetDimensions(
const CXXMemberCallExpr *expr) {
const auto range = expr->getSourceRange();
const auto *object = expr->getImplicitObjectArgument();
auto *objectInstr = loadIfAliasVarRef(object);
const auto type = object->getType();
const bool isBABuf = isByteAddressBuffer(type) || isRWByteAddressBuffer(type);
const bool isStructuredBuf = isStructuredBuffer(type) ||
isAppendStructuredBuffer(type) ||
isConsumeStructuredBuffer(type);
assert(isBABuf || isStructuredBuf);
// (RW)ByteAddressBuffers/(RW)StructuredBuffers are represented as a structure
// with only one member that is a runtime array. We need to perform
// OpArrayLength on member 0.
SpirvInstruction *length = spvBuilder.createArrayLength(
astContext.UnsignedIntTy, expr->getExprLoc(), objectInstr, 0, range);
// For (RW)ByteAddressBuffers, GetDimensions() must return the array length
// in bytes, but OpArrayLength returns the number of uints in the runtime
// array. Therefore we must multiply the results by 4.
if (isBABuf) {
length = spvBuilder.createBinaryOp(
spv::Op::OpIMul, astContext.UnsignedIntTy, length,
// TODO(jaebaek): What line info we should emit for constants?
spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, 4u)),
expr->getExprLoc(), range);
}
spvBuilder.createStore(doExpr(expr->getArg(0)), length,
expr->getArg(0)->getLocStart(), range);
if (isStructuredBuf) {
// For (RW)StructuredBuffer, the stride of the runtime array (which is the
// size of the struct) must also be written to the second argument.
AlignmentSizeCalculator alignmentCalc(astContext, spirvOptions);
uint32_t size = 0, stride = 0;
std::tie(std::ignore, size) =
alignmentCalc.getAlignmentAndSize(type, spirvOptions.sBufferLayoutRule,
/*isRowMajor*/ llvm::None, &stride);
auto *sizeInstr = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, size));
spvBuilder.createStore(doExpr(expr->getArg(1)), sizeInstr,
expr->getArg(1)->getLocStart(), range);
}
return nullptr;
}
SpirvInstruction *SpirvEmitter::processRWByteAddressBufferAtomicMethods(
hlsl::IntrinsicOp opcode, const CXXMemberCallExpr *expr) {
// The signature of RWByteAddressBuffer atomic methods are largely:
// void Interlocked*(in UINT dest, in UINT value);
// void Interlocked*(in UINT dest, in UINT value, out UINT original_value);
const auto *object = expr->getImplicitObjectArgument();
auto *objectInfo = loadIfAliasVarRef(object);
auto *zero =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *offset = doExpr(expr->getArg(0));
// Right shift by 2 to convert the byte offset to uint32_t offset
const auto range = expr->getSourceRange();
auto *address = spvBuilder.createBinaryOp(
spv::Op::OpShiftRightLogical, astContext.UnsignedIntTy, offset,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 2)),
expr->getExprLoc(), range);
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, objectInfo,
{zero, address},
object->getLocStart(), range);
const bool isCompareExchange =
opcode == hlsl::IntrinsicOp::MOP_InterlockedCompareExchange;
const bool isCompareStore =
opcode == hlsl::IntrinsicOp::MOP_InterlockedCompareStore;
if (isCompareExchange || isCompareStore) {
auto *comparator = doExpr(expr->getArg(1));
SpirvInstruction *originalVal = spvBuilder.createAtomicCompareExchange(
astContext.UnsignedIntTy, ptr, spv::Scope::Device,
spv::MemorySemanticsMask::MaskNone, spv::MemorySemanticsMask::MaskNone,
doExpr(expr->getArg(2)), comparator, expr->getCallee()->getExprLoc(),
range);
if (isCompareExchange) {
auto *resultAddress = expr->getArg(3);
QualType resultType = resultAddress->getType();
if (resultType != astContext.UnsignedIntTy)
originalVal = castToInt(originalVal, astContext.UnsignedIntTy,
resultType, expr->getArg(3)->getLocStart());
spvBuilder.createStore(doExpr(expr->getArg(3)), originalVal,
expr->getArg(3)->getLocStart(), range);
}
} else {
const Expr *value = expr->getArg(1);
SpirvInstruction *valueInstr = doExpr(expr->getArg(1));
// Since a RWAB is represented by an array of 32-bit unsigned integers, the
// destination pointee type will always be unsigned, and thus the SPIR-V
// instruction's result type and value type must also be unsigned. The
// signedness of the opcode is determined correctly by frontend and will
// correctly determine the signedness of the actual operation, but the
// necessary argument type cast will not be added by the frontend in the
// case of a signed value.
valueInstr =
castToType(valueInstr, value->getType(), astContext.UnsignedIntTy,
value->getExprLoc(), range);
SpirvInstruction *originalVal = spvBuilder.createAtomicOp(
translateAtomicHlslOpcodeToSpirvOpcode(opcode),
astContext.UnsignedIntTy, ptr, spv::Scope::Device,
spv::MemorySemanticsMask::MaskNone, valueInstr,
expr->getCallee()->getExprLoc(), range);
if (expr->getNumArgs() > 2) {
originalVal = castToType(originalVal, astContext.UnsignedIntTy,
expr->getArg(2)->getType(),
expr->getArg(2)->getLocStart(), range);
spvBuilder.createStore(doExpr(expr->getArg(2)), originalVal,
expr->getArg(2)->getLocStart(), range);
}
}
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processGetSamplePosition(const CXXMemberCallExpr *expr) {
const auto *object = expr->getImplicitObjectArgument()->IgnoreParens();
auto *sampleCount = spvBuilder.createImageQuery(
spv::Op::OpImageQuerySamples, astContext.UnsignedIntTy,
expr->getExprLoc(), loadIfGLValue(object));
if (!spirvOptions.noWarnEmulatedFeatures)
emitWarning("GetSamplePosition is emulated using many SPIR-V instructions "
"due to lack of direct SPIR-V equivalent, so it only supports "
"standard sample settings with 1, 2, 4, 8, or 16 samples and "
"will return float2(0, 0) for other cases",
expr->getCallee()->getExprLoc());
return emitGetSamplePosition(sampleCount, doExpr(expr->getArg(0)),
expr->getCallee()->getExprLoc());
}
SpirvInstruction *
SpirvEmitter::processSubpassLoad(const CXXMemberCallExpr *expr) {
if (!spvContext.isPS()) {
emitError("SubpassInput(MS) only allowed in pixel shader",
expr->getExprLoc());
return nullptr;
}
const auto *object = expr->getImplicitObjectArgument()->IgnoreParens();
SpirvInstruction *sample =
expr->getNumArgs() == 1 ? doExpr(expr->getArg(0)) : nullptr;
auto *zero = spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0));
auto *location = spvBuilder.getConstantComposite(
astContext.getExtVectorType(astContext.IntTy, 2), {zero, zero});
return processBufferTextureLoad(object, location, /*constOffset*/ 0,
/*lod*/ sample,
/*residencyCode*/ 0, expr->getExprLoc());
}
SpirvInstruction *
SpirvEmitter::processBufferTextureGetDimensions(const CXXMemberCallExpr *expr) {
const auto *object = expr->getImplicitObjectArgument();
const auto range = expr->getSourceRange();
auto *objectInstr = loadIfGLValue(object, range);
const auto type = object->getType();
const auto *recType = type->getAs<RecordType>();
assert(recType);
const auto typeName = recType->getDecl()->getName();
const auto numArgs = expr->getNumArgs();
const Expr *mipLevel = nullptr, *numLevels = nullptr, *numSamples = nullptr;
assert(isTexture(type) || isRWTexture(type) || isBuffer(type) ||
isRWBuffer(type));
// For Texture1D, arguments are either:
// a) width
// b) MipLevel, width, NumLevels
// For Texture1DArray, arguments are either:
// a) width, elements
// b) MipLevel, width, elements, NumLevels
// For Texture2D, arguments are either:
// a) width, height
// b) MipLevel, width, height, NumLevels
// For Texture2DArray, arguments are either:
// a) width, height, elements
// b) MipLevel, width, height, elements, NumLevels
// For Texture3D, arguments are either:
// a) width, height, depth
// b) MipLevel, width, height, depth, NumLevels
// For Texture2DMS, arguments are: width, height, NumSamples
// For Texture2DMSArray, arguments are: width, height, elements, NumSamples
// For TextureCube, arguments are either:
// a) width, height
// b) MipLevel, width, height, NumLevels
// For TextureCubeArray, arguments are either:
// a) width, height, elements
// b) MipLevel, width, height, elements, NumLevels
// Note: SPIR-V Spec requires return type of OpImageQuerySize(Lod) to be a
// scalar/vector of integers. SPIR-V Spec also requires return type of
// OpImageQueryLevels and OpImageQuerySamples to be scalar integers.
// The HLSL methods, however, have overloaded functions which have float
// output arguments. Since the AST naturally won't have casting AST nodes for
// such cases, we'll have to perform the cast ourselves.
const auto storeToOutputArg = [range, this](const Expr *outputArg,
SpirvInstruction *id,
QualType type) {
id = castToType(id, type, outputArg->getType(), outputArg->getExprLoc(),
range);
spvBuilder.createStore(doExpr(outputArg, range), id,
outputArg->getLocStart(), range);
};
if ((typeName == "Texture1D" && numArgs > 1) ||
(typeName == "Texture2D" && numArgs > 2) ||
(typeName == "TextureCube" && numArgs > 2) ||
(typeName == "Texture3D" && numArgs > 3) ||
(typeName == "Texture1DArray" && numArgs > 2) ||
(typeName == "TextureCubeArray" && numArgs > 3) ||
(typeName == "Texture2DArray" && numArgs > 3)) {
mipLevel = expr->getArg(0);
numLevels = expr->getArg(numArgs - 1);
}
if (isTextureMS(type)) {
numSamples = expr->getArg(numArgs - 1);
}
// Make sure that all output args are an l-value.
for (uint32_t argIdx = (mipLevel ? 1 : 0); argIdx < numArgs; ++argIdx) {
if (!expr->getArg(argIdx)->isLValue()) {
emitError("Output argument must be an l-value",
expr->getArg(argIdx)->getExprLoc());
return nullptr;
}
}
uint32_t querySize = numArgs;
// If numLevels arg is present, mipLevel must also be present. These are not
// queried via ImageQuerySizeLod.
if (numLevels)
querySize -= 2;
// If numLevels arg is present, mipLevel must also be present.
else if (numSamples)
querySize -= 1;
const QualType resultQualType =
querySize == 1
? astContext.UnsignedIntTy
: astContext.getExtVectorType(astContext.UnsignedIntTy, querySize);
// Only Texture types use ImageQuerySizeLod.
// TextureMS, RWTexture, Buffers, RWBuffers use ImageQuerySize.
SpirvInstruction *lod = nullptr;
if (isTexture(type) && !numSamples) {
if (mipLevel) {
// For Texture types when mipLevel argument is present.
lod = doExpr(mipLevel, range);
} else {
// For Texture types when mipLevel argument is omitted.
lod = spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0));
}
}
SpirvInstruction *query =
lod ? cast<SpirvInstruction>(spvBuilder.createImageQuery(
spv::Op::OpImageQuerySizeLod, resultQualType,
expr->getCallee()->getExprLoc(), objectInstr, lod, range))
: cast<SpirvInstruction>(spvBuilder.createImageQuery(
spv::Op::OpImageQuerySize, resultQualType,
expr->getCallee()->getExprLoc(), objectInstr, nullptr, range));
if (querySize == 1) {
const uint32_t argIndex = mipLevel ? 1 : 0;
storeToOutputArg(expr->getArg(argIndex), query, resultQualType);
} else {
for (uint32_t i = 0; i < querySize; ++i) {
const uint32_t argIndex = mipLevel ? i + 1 : i;
auto *component = spvBuilder.createCompositeExtract(
astContext.UnsignedIntTy, query, {i}, expr->getCallee()->getExprLoc(),
range);
// If the first arg is the mipmap level, we must write the results
// starting from Arg(i+1), not Arg(i).
storeToOutputArg(expr->getArg(argIndex), component,
astContext.UnsignedIntTy);
}
}
if (numLevels || numSamples) {
const Expr *numLevelsSamplesArg = numLevels ? numLevels : numSamples;
const spv::Op opcode =
numLevels ? spv::Op::OpImageQueryLevels : spv::Op::OpImageQuerySamples;
auto *numLevelsSamplesQuery = spvBuilder.createImageQuery(
opcode, astContext.UnsignedIntTy, expr->getCallee()->getExprLoc(),
objectInstr);
storeToOutputArg(numLevelsSamplesArg, numLevelsSamplesQuery,
astContext.UnsignedIntTy);
}
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processTextureLevelOfDetail(const CXXMemberCallExpr *expr,
bool unclamped) {
// Possible signatures are as follows:
// Texture1D(Array).CalculateLevelOfDetail(SamplerState S, float x);
// Texture2D(Array).CalculateLevelOfDetail(SamplerState S, float2 xy);
// TextureCube(Array).CalculateLevelOfDetail(SamplerState S, float3 xyz);
// Texture3D.CalculateLevelOfDetail(SamplerState S, float3 xyz);
// Return type is always a single float (LOD).
assert(expr->getNumArgs() == 2u);
const auto *object = expr->getImplicitObjectArgument();
auto *objectInfo = loadIfGLValue(object);
auto *samplerState = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *sampledImage = spvBuilder.createSampledImage(
object->getType(), objectInfo, samplerState, expr->getExprLoc());
// The result type of OpImageQueryLod must be a float2.
const QualType queryResultType =
astContext.getExtVectorType(astContext.FloatTy, 2u);
auto *query =
spvBuilder.createImageQuery(spv::Op::OpImageQueryLod, queryResultType,
expr->getExprLoc(), sampledImage, coordinate);
if (spvContext.isCS()) {
addDerivativeGroupExecutionMode();
}
// The first component of the float2 contains the mipmap array layer.
// The second component of the float2 represents the unclamped lod.
return spvBuilder.createCompositeExtract(astContext.FloatTy, query,
unclamped ? 1 : 0,
expr->getCallee()->getExprLoc());
}
SpirvInstruction *SpirvEmitter::processTextureGatherRGBACmpRGBA(
const CXXMemberCallExpr *expr, const bool isCmp, const uint32_t component) {
// Parameters for .Gather{Red|Green|Blue|Alpha}() are one of the following
// two sets:
// * SamplerState s, float2 location, int2 offset
// * SamplerState s, float2 location, int2 offset0, int2 offset1,
// int offset2, int2 offset3
//
// An additional 'out uint status' parameter can appear in both of the above.
//
// Parameters for .GatherCmp{Red|Green|Blue|Alpha}() are one of the following
// two sets:
// * SamplerState s, float2 location, float compare_value, int2 offset
// * SamplerState s, float2 location, float compare_value, int2 offset1,
// int2 offset2, int2 offset3, int2 offset4
//
// An additional 'out uint status' parameter can appear in both of the above.
//
// TextureCube's signature is somewhat different from the rest.
// Parameters for .Gather{Red|Green|Blue|Alpha}() for TextureCube are:
// * SamplerState s, float2 location, out uint status
// Parameters for .GatherCmp{Red|Green|Blue|Alpha}() for TextureCube are:
// * SamplerState s, float2 location, float compare_value, out uint status
//
// Return type is always a 4-component vector.
const FunctionDecl *callee = expr->getDirectCallee();
const auto numArgs = expr->getNumArgs();
const auto *imageExpr = expr->getImplicitObjectArgument();
const auto loc = expr->getCallee()->getExprLoc();
const QualType imageType = imageExpr->getType();
const QualType retType = callee->getReturnType();
// If the last arg is an unsigned integer, it must be the status.
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
// Subtract 1 for status arg (if it exists), subtract 1 for compare_value (if
// it exists), and subtract 2 for SamplerState and location.
const auto numOffsetArgs = numArgs - hasStatusArg - isCmp - 2;
// No offset args for TextureCube, 1 or 4 offset args for the rest.
assert(numOffsetArgs == 0 || numOffsetArgs == 1 || numOffsetArgs == 4);
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *compareVal = isCmp ? doExpr(expr->getArg(2)) : nullptr;
// Handle offsets (if any).
bool needsEmulation = false;
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr,
*constOffsets = nullptr;
if (numOffsetArgs == 1) {
// The offset arg is not optional.
handleOffsetInMethodCall(expr, 2 + isCmp, &constOffset, &varOffset);
} else if (numOffsetArgs == 4) {
auto *offset0 = constEvaluator.tryToEvaluateAsConst(expr->getArg(2 + isCmp),
isSpecConstantMode);
auto *offset1 = constEvaluator.tryToEvaluateAsConst(expr->getArg(3 + isCmp),
isSpecConstantMode);
auto *offset2 = constEvaluator.tryToEvaluateAsConst(expr->getArg(4 + isCmp),
isSpecConstantMode);
auto *offset3 = constEvaluator.tryToEvaluateAsConst(expr->getArg(5 + isCmp),
isSpecConstantMode);
// If any of the offsets is not constant, we then need to emulate the call
// using 4 OpImageGather instructions. Otherwise, we can leverage the
// ConstOffsets image operand.
if (offset0 && offset1 && offset2 && offset3) {
const QualType v2i32 = astContext.getExtVectorType(astContext.IntTy, 2);
const auto offsetType = astContext.getConstantArrayType(
v2i32, llvm::APInt(32, 4), clang::ArrayType::Normal, 0);
constOffsets = spvBuilder.getConstantComposite(
offsetType, {offset0, offset1, offset2, offset3});
} else {
needsEmulation = true;
}
}
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
if (needsEmulation) {
const auto elemType = hlsl::GetHLSLVecElementType(callee->getReturnType());
SpirvInstruction *texels[4];
for (uint32_t i = 0; i < 4; ++i) {
varOffset = doExpr(expr->getArg(2 + isCmp + i));
auto *gatherRet = spvBuilder.createImageGather(
retType, imageType, image, sampler, coordinate,
spvBuilder.getConstantInt(astContext.IntTy,
llvm::APInt(32, component, true)),
compareVal,
/*constOffset*/ nullptr, varOffset, /*constOffsets*/ nullptr,
/*sampleNumber*/ nullptr, status, loc);
texels[i] =
spvBuilder.createCompositeExtract(elemType, gatherRet, {i}, loc);
}
return spvBuilder.createCompositeConstruct(
retType, {texels[0], texels[1], texels[2], texels[3]}, loc);
}
return spvBuilder.createImageGather(
retType, imageType, image, sampler, coordinate,
spvBuilder.getConstantInt(astContext.IntTy,
llvm::APInt(32, component, true)),
compareVal, constOffset, varOffset, constOffsets,
/*sampleNumber*/ nullptr, status, loc);
}
SpirvInstruction *
SpirvEmitter::processTextureGatherCmp(const CXXMemberCallExpr *expr) {
// Signature for Texture2D/Texture2DArray:
//
// float4 GatherCmp(
// in SamplerComparisonState s,
// in float2 location,
// in float compare_value
// [,in int2 offset]
// [,out uint Status]
// );
//
// Signature for TextureCube/TextureCubeArray:
//
// float4 GatherCmp(
// in SamplerComparisonState s,
// in float2 location,
// in float compare_value,
// out uint Status
// );
//
// Other Texture types do not have the GatherCmp method.
const FunctionDecl *callee = expr->getDirectCallee();
const auto numArgs = expr->getNumArgs();
const auto loc = expr->getExprLoc();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
const bool hasOffsetArg = (numArgs == 5) || (numArgs == 4 && !hasStatusArg);
const auto *imageExpr = expr->getImplicitObjectArgument();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *comparator = doExpr(expr->getArg(2));
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 3, &constOffset, &varOffset);
const auto retType = callee->getReturnType();
const auto imageType = imageExpr->getType();
const auto status =
hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
return spvBuilder.createImageGather(
retType, imageType, image, sampler, coordinate,
/*component*/ nullptr, comparator, constOffset, varOffset,
/*constOffsets*/ nullptr,
/*sampleNumber*/ nullptr, status, loc);
}
SpirvInstruction *SpirvEmitter::processBufferTextureLoad(
const Expr *object, SpirvInstruction *location,
SpirvInstruction *constOffset, SpirvInstruction *lod,
SpirvInstruction *residencyCode, SourceLocation loc, SourceRange range) {
// Loading for Buffer and RWBuffer translates to an OpImageFetch.
// The result type of an OpImageFetch must be a vec4 of float or int.
const auto type = object->getType();
assert(isBuffer(type) || isRWBuffer(type) || isTexture(type) ||
isRWTexture(type) || isSubpassInput(type) || isSubpassInputMS(type));
const bool doFetch = isBuffer(type) || isTexture(type);
const bool rasterizerOrdered = isRasterizerOrderedView(type);
if (rasterizerOrdered) {
beginInvocationInterlock(loc, range);
}
auto *objectInfo = loadIfGLValue(object, range);
// For Texture2DMS and Texture2DMSArray, Sample must be used rather than Lod.
SpirvInstruction *sampleNumber = nullptr;
if (isTextureMS(type) || isSubpassInputMS(type)) {
sampleNumber = lod;
lod = nullptr;
}
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
QualType elemType = sampledType;
uint32_t elemCount = 1;
bool isTemplateOverStruct = false;
bool isTemplateTypeBool = false;
// Check whether the template type is a vector type or struct type.
if (!isVectorType(sampledType, &elemType, &elemCount)) {
if (sampledType->getAsStructureType()) {
isTemplateOverStruct = true;
// For struct type, we need to make sure it can fit into a 4-component
// vector. Detailed failing reasons will be emitted by the function so
// we don't need to emit errors here.
if (!canFitIntoOneRegister(astContext, sampledType, &elemType,
&elemCount))
return nullptr;
}
}
// Check whether template type is bool.
if (elemType->isBooleanType()) {
isTemplateTypeBool = true;
// Replace with unsigned int, and cast back to bool later.
elemType = astContext.getUIntPtrType();
}
{
// Treat a vector of size 1 the same as a scalar.
if (hlsl::IsHLSLVecType(elemType) && hlsl::GetHLSLVecSize(elemType) == 1)
elemType = hlsl::GetHLSLVecElementType(elemType);
if (!elemType->isFloatingType() && !elemType->isIntegerType()) {
emitError("loading %0 value unsupported", object->getExprLoc()) << type;
return nullptr;
}
}
// If residencyCode is nullptr, we are dealing with a Load method with 2
// arguments which does not return the operation status.
if (residencyCode && residencyCode->isRValue()) {
emitError(
"an lvalue argument should be used for returning the operation status",
loc);
return nullptr;
}
// OpImageFetch and OpImageRead can only fetch a vector of 4 elements.
const QualType texelType = astContext.getExtVectorType(elemType, 4u);
auto *texel = spvBuilder.createImageFetchOrRead(
doFetch, texelType, type, objectInfo, location, lod, constOffset,
/*constOffsets*/ nullptr, sampleNumber, residencyCode, loc, range);
if (rasterizerOrdered) {
spvBuilder.createEndInvocationInterlockEXT(loc, range);
}
// If the result type is a vec1, vec2, or vec3, some extra processing
// (extraction) is required.
auto *retVal = extractVecFromVec4(texel, elemCount, elemType, loc, range);
if (isTemplateOverStruct) {
// Convert to the struct so that we are consistent with types in the AST.
retVal = convertVectorToStruct(sampledType, elemType, retVal, loc, range);
}
// If the result type is a bool, after loading the uint, convert it to
// boolean.
if (isTemplateTypeBool) {
const QualType toType =
elemCount > 1 ? astContext.getExtVectorType(elemType, elemCount)
: elemType;
retVal = castToBool(retVal, toType, sampledType, loc);
}
if (!retVal)
return nullptr;
retVal->setRValue();
return retVal;
}
SpirvInstruction *SpirvEmitter::processByteAddressBufferLoadStore(
const CXXMemberCallExpr *expr, uint32_t numWords, bool doStore) {
SpirvInstruction *result = nullptr;
const auto object = expr->getImplicitObjectArgument();
auto *objectInfo = loadIfAliasVarRef(object);
assert(numWords >= 1 && numWords <= 4);
if (doStore) {
assert(isRWByteAddressBuffer(object->getType()));
assert(expr->getNumArgs() == 2);
} else {
assert(isRWByteAddressBuffer(object->getType()) ||
isByteAddressBuffer(object->getType()));
if (expr->getNumArgs() == 2) {
emitError(
"(RW)ByteAddressBuffer::Load(in address, out status) not supported",
expr->getExprLoc());
return 0;
}
}
const Expr *addressExpr = expr->getArg(0);
auto *byteAddress = doExpr(addressExpr);
const QualType addressType = addressExpr->getType();
// The front-end prevents usage of templated Load2, Load3, Load4, Store2,
// Store3, Store4 intrinsic functions.
const bool isTemplatedLoadOrStore =
(numWords == 1) &&
(doStore ? !expr->getArg(1)->getType()->isSpecificBuiltinType(
BuiltinType::UInt)
: !expr->getType()->isSpecificBuiltinType(BuiltinType::UInt));
const auto range = expr->getSourceRange();
const bool rasterizerOrder = isRasterizerOrderedView(object->getType());
if (isTemplatedLoadOrStore) {
// Templated load. Need to (potentially) perform more
// loads/casts/composite-constructs.
if (rasterizerOrder) {
beginInvocationInterlock(expr->getLocStart(), range);
}
if (doStore) {
auto *values = doExpr(expr->getArg(1));
RawBufferHandler(*this).processTemplatedStoreToBuffer(
values, objectInfo, byteAddress, expr->getArg(1)->getType(), range);
result = nullptr;
} else {
RawBufferHandler rawBufferHandler(*this);
result = rawBufferHandler.processTemplatedLoadFromBuffer(
objectInfo, byteAddress, expr->getType(), range);
}
if (rasterizerOrder) {
spvBuilder.createEndInvocationInterlockEXT(expr->getLocStart(), range);
}
return result;
}
// Do a OpShiftRightLogical by 2 (divide by 4 to get aligned memory
// access). The AST always casts the address to unsigned integer, so shift
// by unsigned integer 2.
auto *constUint2 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 2));
SpirvInstruction *address = spvBuilder.createBinaryOp(
spv::Op::OpShiftRightLogical, addressType, byteAddress, constUint2,
expr->getExprLoc(), range);
// We might be able to reduce duplication by handling this with
// processTemplatedLoadFromBuffer
if (rasterizerOrder) {
beginInvocationInterlock(expr->getLocStart(), range);
}
// Perform access chain into the RWByteAddressBuffer.
// First index must be zero (member 0 of the struct is a
// runtimeArray). The second index passed to OpAccessChain should be
// the address.
auto *constUint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
if (doStore) {
auto *values = doExpr(expr->getArg(1));
auto *curStoreAddress = address;
for (uint32_t wordCounter = 0; wordCounter < numWords; ++wordCounter) {
// Extract a 32-bit word from the input.
auto *curValue = numWords == 1
? values
: spvBuilder.createCompositeExtract(
astContext.UnsignedIntTy, values,
{wordCounter}, expr->getArg(1)->getExprLoc(),
expr->getArg(1)->getSourceRange());
// Update the output address if necessary.
if (wordCounter > 0) {
auto *offset = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, wordCounter));
curStoreAddress = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, addressType, address, offset,
expr->getCallee()->getExprLoc(), range);
}
// Store the word to the right address at the output.
auto *storePtr = spvBuilder.createAccessChain(
astContext.UnsignedIntTy, objectInfo, {constUint0, curStoreAddress},
object->getLocStart(), range);
spvBuilder.createStore(storePtr, curValue,
expr->getCallee()->getExprLoc(), range);
}
} else {
auto *loadPtr = spvBuilder.createAccessChain(
astContext.UnsignedIntTy, objectInfo, {constUint0, address},
object->getLocStart(), range);
result = spvBuilder.createLoad(astContext.UnsignedIntTy, loadPtr,
expr->getCallee()->getExprLoc(), range);
if (numWords > 1) {
// Load word 2, 3, and 4 where necessary. Use OpCompositeConstruct to
// return a vector result.
llvm::SmallVector<SpirvInstruction *, 4> values;
values.push_back(result);
for (uint32_t wordCounter = 2; wordCounter <= numWords; ++wordCounter) {
auto *offset = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, wordCounter - 1));
auto *newAddress = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, addressType, address, offset,
expr->getCallee()->getExprLoc(), range);
loadPtr = spvBuilder.createAccessChain(
astContext.UnsignedIntTy, objectInfo, {constUint0, newAddress},
object->getLocStart(), range);
values.push_back(
spvBuilder.createLoad(astContext.UnsignedIntTy, loadPtr,
expr->getCallee()->getExprLoc(), range));
}
const QualType resultType =
astContext.getExtVectorType(addressType, numWords);
result = spvBuilder.createCompositeConstruct(resultType, values,
expr->getLocStart(), range);
if (!result) {
result = nullptr;
} else {
result->setRValue();
}
}
}
if (rasterizerOrder) {
spvBuilder.createEndInvocationInterlockEXT(expr->getLocStart(), range);
}
return result;
}
SpirvInstruction *
SpirvEmitter::processStructuredBufferLoad(const CXXMemberCallExpr *expr) {
if (expr->getNumArgs() == 2) {
emitError(
"(RW)StructuredBuffer::Load(in location, out status) not supported",
expr->getExprLoc());
return 0;
}
const auto *buffer = expr->getImplicitObjectArgument();
const auto range = expr->getSourceRange();
auto *info = loadIfAliasVarRef(buffer, range);
const QualType structType =
hlsl::GetHLSLResourceResultType(buffer->getType());
auto *zero = spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0));
auto *index = doExpr(expr->getArg(0));
return derefOrCreatePointerToValue(buffer->getType(), info, structType,
{zero, index}, buffer->getExprLoc(),
range);
}
SpirvInstruction *
SpirvEmitter::incDecRWACSBufferCounter(const CXXMemberCallExpr *expr,
bool isInc, bool loadObject) {
auto *zero =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
auto *sOne =
spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 1, true));
const auto srcLoc = expr->getCallee()->getExprLoc();
const auto srcRange = expr->getSourceRange();
const auto *object =
expr->getImplicitObjectArgument()->IgnoreParenNoopCasts(astContext);
if (loadObject) {
// We don't need the object's <result-id> here since counter variable is a
// separate variable. But we still need the side effects of evaluating the
// object, e.g., if the source code is foo(...).IncrementCounter(), we still
// want to emit the code for foo(...).
(void)doExpr(object);
}
auto *counter = getFinalACSBufferCounterInstruction(object);
if (!counter) {
emitFatalError("cannot find the associated counter variable",
object->getExprLoc());
return nullptr;
}
// Add an extra 0 because the counter is wrapped in a struct.
auto *counterPtr = spvBuilder.createAccessChain(astContext.IntTy, counter,
{zero}, srcLoc, srcRange);
SpirvInstruction *index = nullptr;
if (isInc) {
index = spvBuilder.createAtomicOp(
spv::Op::OpAtomicIAdd, astContext.IntTy, counterPtr, spv::Scope::Device,
spv::MemorySemanticsMask::MaskNone, sOne, srcLoc, srcRange);
} else {
// Note that OpAtomicISub returns the value before the subtraction;
// so we need to do substraction again with OpAtomicISub's return value.
auto *prev = spvBuilder.createAtomicOp(
spv::Op::OpAtomicISub, astContext.IntTy, counterPtr, spv::Scope::Device,
spv::MemorySemanticsMask::MaskNone, sOne, srcLoc, srcRange);
index = spvBuilder.createBinaryOp(spv::Op::OpISub, astContext.IntTy, prev,
sOne, srcLoc, srcRange);
}
return index;
}
bool SpirvEmitter::tryToAssignCounterVar(const DeclaratorDecl *dstDecl,
const Expr *srcExpr) {
// We are handling associated counters here. Casts should not alter which
// associated counter to manipulate.
srcExpr = srcExpr->IgnoreParenCasts();
// For parameters of forward-declared functions. We must make sure the
// associated counter variable is created. But for forward-declared functions,
// the translation of the real definition may not be started yet.
if (const auto *param = dyn_cast<ParmVarDecl>(dstDecl))
declIdMapper.createFnParamCounterVar(param);
// For implicit objects of methods. Similar to the above.
else if (const auto *thisObject = dyn_cast<ImplicitParamDecl>(dstDecl))
declIdMapper.createFnParamCounterVar(thisObject);
// Handle AssocCounter#1 (see CounterVarFields comment)
if (const auto *dstPair =
declIdMapper.createOrGetCounterIdAliasPair(dstDecl)) {
auto *srcCounter = getFinalACSBufferCounterInstruction(srcExpr);
if (!srcCounter) {
emitFatalError("cannot find the associated counter variable",
srcExpr->getExprLoc());
return false;
}
dstPair->assign(srcCounter, spvBuilder);
return true;
}
// Handle AssocCounter#3
llvm::SmallVector<uint32_t, 4> srcIndices;
const auto *dstFields = declIdMapper.getCounterVarFields(dstDecl);
const auto *srcFields = getIntermediateACSBufferCounter(srcExpr, &srcIndices);
if (dstFields && srcFields) {
// The destination is a struct whose fields are directly alias resources.
// But that's not necessarily true for the source, which can be deep
// nested structs. That means they will have different index "prefixes"
// for all their fields; while the "prefix" for destination is effectively
// an empty list (since it is not nested in other structs). We need to
// strip the index prefix from the source.
return dstFields->assign(*srcFields, /*dstIndices=*/{}, srcIndices,
spvBuilder, spvContext);
}
// AssocCounter#2 and AssocCounter#4 for the lhs cannot happen since the lhs
// is a stand-alone decl in this method.
return false;
}
bool SpirvEmitter::tryToAssignCounterVar(const Expr *dstExpr,
const Expr *srcExpr) {
dstExpr = dstExpr->IgnoreParenCasts();
srcExpr = srcExpr->IgnoreParenCasts();
auto *dstCounter = getFinalACSBufferCounterAliasAddressInstruction(dstExpr);
auto *srcCounter = getFinalACSBufferCounterInstruction(srcExpr);
if ((dstCounter == nullptr) != (srcCounter == nullptr)) {
emitFatalError("cannot handle associated counter variable assignment",
srcExpr->getExprLoc());
return false;
}
// Handle AssocCounter#1 & AssocCounter#2
if (dstCounter && srcCounter) {
spvBuilder.createStore(dstCounter, srcCounter, /* SourceLocation */ {});
return true;
}
// Handle AssocCounter#3 & AssocCounter#4
llvm::SmallVector<uint32_t, 4> dstIndices;
llvm::SmallVector<uint32_t, 4> srcIndices;
const auto *srcFields = getIntermediateACSBufferCounter(srcExpr, &srcIndices);
const auto *dstFields = getIntermediateACSBufferCounter(dstExpr, &dstIndices);
if (dstFields && srcFields) {
return dstFields->assign(*srcFields, dstIndices, srcIndices, spvBuilder,
spvContext);
}
return false;
}
SpirvInstruction *SpirvEmitter::getFinalACSBufferCounterAliasAddressInstruction(
const Expr *expr) {
const CounterIdAliasPair *counter = getFinalACSBufferCounter(expr);
return (counter ? counter->getAliasAddress() : nullptr);
}
SpirvInstruction *
SpirvEmitter::getFinalACSBufferCounterInstruction(const Expr *expr) {
const CounterIdAliasPair *counterPair = getFinalACSBufferCounter(expr);
if (!counterPair)
return nullptr;
SpirvInstruction *counter =
counterPair->getCounterVariable(spvBuilder, spvContext);
const auto srcLoc = expr->getExprLoc();
// TODO(5440): This codes does not handle multi-dimensional arrays. We need
// to look at specific example to determine the best way to do it. Could a
// call to collectArrayStructIndices handle that for us?
llvm::SmallVector<SpirvInstruction *, 2> indexes;
if (const auto *arraySubscriptExpr = dyn_cast<ArraySubscriptExpr>(expr)) {
indexes.push_back(doExpr(arraySubscriptExpr->getIdx()));
} else if (isResourceDescriptorHeap(expr->getType())) {
const Expr *index = nullptr;
getDescriptorHeapOperands(expr, /* base= */ nullptr, &index);
assert(index != nullptr && "operator[] had no indices.");
indexes.push_back(doExpr(index));
}
if (!indexes.empty()) {
counter = spvBuilder.createAccessChain(spvContext.getACSBufferCounterType(),
counter, indexes, srcLoc);
}
return counter;
}
const CounterIdAliasPair *
SpirvEmitter::getFinalACSBufferCounter(const Expr *expr) {
// AssocCounter#1: referencing some stand-alone variable
if (const auto *decl = getReferencedDef(expr))
return declIdMapper.createOrGetCounterIdAliasPair(decl);
if (isResourceDescriptorHeap(expr->getType())) {
const Expr *base = nullptr;
getDescriptorHeapOperands(expr, &base, /* index= */ nullptr);
return declIdMapper.createOrGetCounterIdAliasPair(getReferencedDef(base));
}
// AssocCounter#2: referencing some non-struct field
llvm::SmallVector<uint32_t, 4> rawIndices;
const auto *base = collectArrayStructIndices(
expr, /*rawIndex=*/true, &rawIndices, /*indices*/ nullptr);
const auto *decl =
(base && isa<CXXThisExpr>(base))
? getOrCreateDeclForMethodObject(cast<CXXMethodDecl>(curFunction))
: getReferencedDef(base);
return declIdMapper.getCounterIdAliasPair(decl, &rawIndices);
}
const CounterVarFields *SpirvEmitter::getIntermediateACSBufferCounter(
const Expr *expr, llvm::SmallVector<uint32_t, 4> *rawIndices) {
const auto *base = collectArrayStructIndices(expr, /*rawIndex=*/true,
rawIndices, /*indices*/ nullptr);
const auto *decl =
(base && isa<CXXThisExpr>(base))
// Use the decl we created to represent the implicit object
? getOrCreateDeclForMethodObject(cast<CXXMethodDecl>(curFunction))
// Find the referenced decl from the original source code
: getReferencedDef(base);
return declIdMapper.getCounterVarFields(decl);
}
const ImplicitParamDecl *
SpirvEmitter::getOrCreateDeclForMethodObject(const CXXMethodDecl *method) {
const auto found = thisDecls.find(method);
if (found != thisDecls.end())
return found->second;
const std::string name = getFunctionOrOperatorName(method, true) + ".this";
// Create a new identifier to convey the name
auto &identifier = astContext.Idents.get(name);
return thisDecls[method] = ImplicitParamDecl::Create(
astContext, /*DC=*/nullptr, SourceLocation(), &identifier,
method->getThisType(astContext)->getPointeeType());
}
SpirvInstruction *
SpirvEmitter::processACSBufferAppendConsume(const CXXMemberCallExpr *expr) {
const bool isAppend = expr->getNumArgs() == 1;
auto *zero =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
const auto *object =
expr->getImplicitObjectArgument()->IgnoreParenNoopCasts(astContext);
auto *bufferInfo = loadIfAliasVarRef(object);
auto *index = incDecRWACSBufferCounter(
expr, isAppend,
// We have already translated the object in the above. Avoid duplication.
/*loadObject=*/false);
auto bufferElemTy = hlsl::GetHLSLResourceResultType(object->getType());
// If this is a variable to communicate with host e.g., ACSBuffer
// and its type is bool or vector of bool, its effective type used
// for SPIRV must be uint not bool. We must convert it to uint here.
bool needCast = false;
if (bufferInfo->getLayoutRule() != SpirvLayoutRule::Void &&
isBoolOrVecOfBoolType(bufferElemTy)) {
uint32_t vecSize = 1;
const bool isVec = isVectorType(bufferElemTy, nullptr, &vecSize);
bufferElemTy =
isVec ? astContext.getExtVectorType(astContext.UnsignedIntTy, vecSize)
: astContext.UnsignedIntTy;
needCast = true;
}
const auto range = expr->getSourceRange();
bufferInfo =
derefOrCreatePointerToValue(object->getType(), bufferInfo, bufferElemTy,
{zero, index}, object->getExprLoc(), range);
if (isAppend) {
// Write out the value
auto *arg0 = doExpr(expr->getArg(0), range);
if (!arg0)
return nullptr;
if (!arg0->isRValue()) {
arg0 = spvBuilder.createLoad(bufferElemTy, arg0,
expr->getArg(0)->getExprLoc(), range);
}
if (needCast &&
!isSameType(astContext, bufferElemTy, arg0->getAstResultType())) {
arg0 = castToType(arg0, arg0->getAstResultType(), bufferElemTy,
expr->getArg(0)->getExprLoc(), range);
}
storeValue(bufferInfo, arg0, bufferElemTy, expr->getCallee()->getExprLoc(),
range);
return 0;
} else {
// Note that we are returning a pointer (lvalue) here inorder to further
// acess the fields in this element, e.g., buffer.Consume().a.b. So we
// cannot forcefully set all normal function calls as returning rvalue.
return bufferInfo;
}
}
SpirvInstruction *
SpirvEmitter::processStreamOutputAppend(const CXXMemberCallExpr *expr) {
// TODO: handle multiple stream-output objects
const auto range = expr->getSourceRange();
const auto *object =
expr->getImplicitObjectArgument()->IgnoreParenNoopCasts(astContext);
const auto *stream = cast<DeclRefExpr>(object)->getDecl();
auto *value = doExpr(expr->getArg(0), range);
declIdMapper.writeBackOutputStream(stream, stream->getType(), value, range);
spvBuilder.createEmitVertex(expr->getExprLoc(), range);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processStreamOutputRestart(const CXXMemberCallExpr *expr) {
// TODO: handle multiple stream-output objects
spvBuilder.createEndPrimitive(expr->getExprLoc(), expr->getSourceRange());
return 0;
}
SpirvInstruction *
SpirvEmitter::emitGetSamplePosition(SpirvInstruction *sampleCount,
SpirvInstruction *sampleIndex,
SourceLocation loc, SourceRange range) {
struct Float2 {
float x;
float y;
};
static const Float2 pos2[] = {
{4.0 / 16.0, 4.0 / 16.0},
{-4.0 / 16.0, -4.0 / 16.0},
};
static const Float2 pos4[] = {
{-2.0 / 16.0, -6.0 / 16.0},
{6.0 / 16.0, -2.0 / 16.0},
{-6.0 / 16.0, 2.0 / 16.0},
{2.0 / 16.0, 6.0 / 16.0},
};
static const Float2 pos8[] = {
{1.0 / 16.0, -3.0 / 16.0}, {-1.0 / 16.0, 3.0 / 16.0},
{5.0 / 16.0, 1.0 / 16.0}, {-3.0 / 16.0, -5.0 / 16.0},
{-5.0 / 16.0, 5.0 / 16.0}, {-7.0 / 16.0, -1.0 / 16.0},
{3.0 / 16.0, 7.0 / 16.0}, {7.0 / 16.0, -7.0 / 16.0},
};
static const Float2 pos16[] = {
{1.0 / 16.0, 1.0 / 16.0}, {-1.0 / 16.0, -3.0 / 16.0},
{-3.0 / 16.0, 2.0 / 16.0}, {4.0 / 16.0, -1.0 / 16.0},
{-5.0 / 16.0, -2.0 / 16.0}, {2.0 / 16.0, 5.0 / 16.0},
{5.0 / 16.0, 3.0 / 16.0}, {3.0 / 16.0, -5.0 / 16.0},
{-2.0 / 16.0, 6.0 / 16.0}, {0.0 / 16.0, -7.0 / 16.0},
{-4.0 / 16.0, -6.0 / 16.0}, {-6.0 / 16.0, 4.0 / 16.0},
{-8.0 / 16.0, 0.0 / 16.0}, {7.0 / 16.0, -4.0 / 16.0},
{6.0 / 16.0, 7.0 / 16.0}, {-7.0 / 16.0, -8.0 / 16.0},
};
// We are emitting the SPIR-V for the following HLSL source code:
//
// float2 position;
//
// if (count == 2) {
// position = pos2[index];
// }
// else if (count == 4) {
// position = pos4[index];
// }
// else if (count == 8) {
// position = pos8[index];
// }
// else if (count == 16) {
// position = pos16[index];
// }
// else {
// position = float2(0.0f, 0.0f);
// }
const auto v2f32Type = astContext.getExtVectorType(astContext.FloatTy, 2);
// Creates a SPIR-V function scope variable of type float2[len].
const auto createArray = [this, v2f32Type, loc, range](const Float2 *ptr,
uint32_t len) {
llvm::SmallVector<SpirvConstant *, 16> components;
for (uint32_t i = 0; i < len; ++i) {
auto *x = spvBuilder.getConstantFloat(astContext.FloatTy,
llvm::APFloat(ptr[i].x));
auto *y = spvBuilder.getConstantFloat(astContext.FloatTy,
llvm::APFloat(ptr[i].y));
components.push_back(spvBuilder.getConstantComposite(v2f32Type, {x, y}));
}
const auto arrType = astContext.getConstantArrayType(
v2f32Type, llvm::APInt(32, len), clang::ArrayType::Normal, 0);
auto *val = spvBuilder.getConstantComposite(arrType, components);
const std::string varName =
"var.GetSamplePosition.data." + std::to_string(len);
auto *var = spvBuilder.addFnVar(arrType, loc, varName);
spvBuilder.createStore(var, val, loc, range);
return var;
};
auto *pos2Arr = createArray(pos2, 2);
auto *pos4Arr = createArray(pos4, 4);
auto *pos8Arr = createArray(pos8, 8);
auto *pos16Arr = createArray(pos16, 16);
auto *resultVar =
spvBuilder.addFnVar(v2f32Type, loc, "var.GetSamplePosition.result");
auto *then2BB = spvBuilder.createBasicBlock("if.GetSamplePosition.then2");
auto *then4BB = spvBuilder.createBasicBlock("if.GetSamplePosition.then4");
auto *then8BB = spvBuilder.createBasicBlock("if.GetSamplePosition.then8");
auto *then16BB = spvBuilder.createBasicBlock("if.GetSamplePosition.then16");
auto *else2BB = spvBuilder.createBasicBlock("if.GetSamplePosition.else2");
auto *else4BB = spvBuilder.createBasicBlock("if.GetSamplePosition.else4");
auto *else8BB = spvBuilder.createBasicBlock("if.GetSamplePosition.else8");
auto *else16BB = spvBuilder.createBasicBlock("if.GetSamplePosition.else16");
auto *merge2BB = spvBuilder.createBasicBlock("if.GetSamplePosition.merge2");
auto *merge4BB = spvBuilder.createBasicBlock("if.GetSamplePosition.merge4");
auto *merge8BB = spvBuilder.createBasicBlock("if.GetSamplePosition.merge8");
auto *merge16BB = spvBuilder.createBasicBlock("if.GetSamplePosition.merge16");
// if (count == 2) {
const auto check2 = spvBuilder.createBinaryOp(
spv::Op::OpIEqual, astContext.BoolTy, sampleCount,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 2)),
loc, range);
spvBuilder.createConditionalBranch(check2, then2BB, else2BB, loc, merge2BB,
nullptr,
spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(then2BB);
spvBuilder.addSuccessor(else2BB);
spvBuilder.setMergeTarget(merge2BB);
// position = pos2[index];
// }
spvBuilder.setInsertPoint(then2BB);
auto *ac = spvBuilder.createAccessChain(v2f32Type, pos2Arr, {sampleIndex},
loc, range);
spvBuilder.createStore(
resultVar, spvBuilder.createLoad(v2f32Type, ac, loc, range), loc, range);
spvBuilder.createBranch(merge2BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge2BB);
// else if (count == 4) {
spvBuilder.setInsertPoint(else2BB);
const auto check4 = spvBuilder.createBinaryOp(
spv::Op::OpIEqual, astContext.BoolTy, sampleCount,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 4)),
loc, range);
spvBuilder.createConditionalBranch(check4, then4BB, else4BB, loc, merge4BB,
nullptr,
spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(then4BB);
spvBuilder.addSuccessor(else4BB);
spvBuilder.setMergeTarget(merge4BB);
// position = pos4[index];
// }
spvBuilder.setInsertPoint(then4BB);
ac = spvBuilder.createAccessChain(v2f32Type, pos4Arr, {sampleIndex}, loc,
range);
spvBuilder.createStore(
resultVar, spvBuilder.createLoad(v2f32Type, ac, loc, range), loc, range);
spvBuilder.createBranch(merge4BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge4BB);
// else if (count == 8) {
spvBuilder.setInsertPoint(else4BB);
const auto check8 = spvBuilder.createBinaryOp(
spv::Op::OpIEqual, astContext.BoolTy, sampleCount,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 8)),
loc, range);
spvBuilder.createConditionalBranch(check8, then8BB, else8BB, loc, merge8BB,
nullptr,
spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(then8BB);
spvBuilder.addSuccessor(else8BB);
spvBuilder.setMergeTarget(merge8BB);
// position = pos8[index];
// }
spvBuilder.setInsertPoint(then8BB);
ac = spvBuilder.createAccessChain(v2f32Type, pos8Arr, {sampleIndex}, loc,
range);
spvBuilder.createStore(
resultVar, spvBuilder.createLoad(v2f32Type, ac, loc, range), loc, range);
spvBuilder.createBranch(merge8BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge8BB);
// else if (count == 16) {
spvBuilder.setInsertPoint(else8BB);
const auto check16 = spvBuilder.createBinaryOp(
spv::Op::OpIEqual, astContext.BoolTy, sampleCount,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 16)),
loc, range);
spvBuilder.createConditionalBranch(check16, then16BB, else16BB, loc,
merge16BB, nullptr,
spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(then16BB);
spvBuilder.addSuccessor(else16BB);
spvBuilder.setMergeTarget(merge16BB);
// position = pos16[index];
// }
spvBuilder.setInsertPoint(then16BB);
ac = spvBuilder.createAccessChain(v2f32Type, pos16Arr, {sampleIndex}, loc,
range);
spvBuilder.createStore(
resultVar, spvBuilder.createLoad(v2f32Type, ac, loc, range), loc, range);
spvBuilder.createBranch(merge16BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge16BB);
// else {
// position = float2(0.0f, 0.0f);
// }
spvBuilder.setInsertPoint(else16BB);
auto *zero =
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(0.0f));
auto *v2f32Zero = spvBuilder.getConstantComposite(v2f32Type, {zero, zero});
spvBuilder.createStore(resultVar, v2f32Zero, loc, range);
spvBuilder.createBranch(merge16BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge16BB);
spvBuilder.setInsertPoint(merge16BB);
spvBuilder.createBranch(merge8BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge8BB);
spvBuilder.setInsertPoint(merge8BB);
spvBuilder.createBranch(merge4BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge4BB);
spvBuilder.setInsertPoint(merge4BB);
spvBuilder.createBranch(merge2BB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(merge2BB);
spvBuilder.setInsertPoint(merge2BB);
return spvBuilder.createLoad(v2f32Type, resultVar, loc, range);
}
SpirvInstruction *
SpirvEmitter::doCXXMemberCallExpr(const CXXMemberCallExpr *expr) {
const FunctionDecl *callee = expr->getDirectCallee();
llvm::StringRef group;
uint32_t opcode = static_cast<uint32_t>(hlsl::IntrinsicOp::Num_Intrinsics);
if (hlsl::GetIntrinsicOp(callee, opcode, group)) {
return processIntrinsicMemberCall(expr,
static_cast<hlsl::IntrinsicOp>(opcode));
}
return processCall(expr);
}
void SpirvEmitter::handleOffsetInMethodCall(const CXXMemberCallExpr *expr,
uint32_t index,
SpirvInstruction **constOffset,
SpirvInstruction **varOffset) {
assert(constOffset && varOffset);
// Ensure the given arg index is not out-of-range.
assert(index < expr->getNumArgs());
*constOffset = *varOffset = nullptr; // Initialize both first
if ((*constOffset = constEvaluator.tryToEvaluateAsConst(expr->getArg(index),
isSpecConstantMode)))
return; // Constant offset
else
*varOffset = doExpr(expr->getArg(index));
}
SpirvInstruction *
SpirvEmitter::processIntrinsicMemberCall(const CXXMemberCallExpr *expr,
hlsl::IntrinsicOp opcode) {
using namespace hlsl;
SpirvInstruction *retVal = nullptr;
switch (opcode) {
case IntrinsicOp::MOP_Sample:
retVal = processTextureSampleGather(expr, /*isSample=*/true);
break;
case IntrinsicOp::MOP_Gather:
retVal = processTextureSampleGather(expr, /*isSample=*/false);
break;
case IntrinsicOp::MOP_SampleBias:
retVal = processTextureSampleBiasLevel(expr, /*isBias=*/true);
break;
case IntrinsicOp::MOP_SampleLevel:
retVal = processTextureSampleBiasLevel(expr, /*isBias=*/false);
break;
case IntrinsicOp::MOP_SampleGrad:
retVal = processTextureSampleGrad(expr);
break;
case IntrinsicOp::MOP_SampleCmp:
retVal = processTextureSampleCmp(expr);
break;
case IntrinsicOp::MOP_SampleCmpLevelZero:
retVal = processTextureSampleCmpLevelZero(expr);
break;
case IntrinsicOp::MOP_SampleCmpLevel:
retVal = processTextureSampleCmpLevel(expr);
break;
case IntrinsicOp::MOP_GatherRed:
retVal = processTextureGatherRGBACmpRGBA(expr, /*isCmp=*/false, 0);
break;
case IntrinsicOp::MOP_GatherGreen:
retVal = processTextureGatherRGBACmpRGBA(expr, /*isCmp=*/false, 1);
break;
case IntrinsicOp::MOP_GatherBlue:
retVal = processTextureGatherRGBACmpRGBA(expr, /*isCmp=*/false, 2);
break;
case IntrinsicOp::MOP_GatherAlpha:
retVal = processTextureGatherRGBACmpRGBA(expr, /*isCmp=*/false, 3);
break;
case IntrinsicOp::MOP_GatherCmp:
retVal = processTextureGatherCmp(expr);
break;
case IntrinsicOp::MOP_GatherCmpRed:
retVal = processTextureGatherRGBACmpRGBA(expr, /*isCmp=*/true, 0);
break;
case IntrinsicOp::MOP_Load:
return processBufferTextureLoad(expr);
case IntrinsicOp::MOP_Load2:
return processByteAddressBufferLoadStore(expr, 2, /*doStore*/ false);
case IntrinsicOp::MOP_Load3:
return processByteAddressBufferLoadStore(expr, 3, /*doStore*/ false);
case IntrinsicOp::MOP_Load4:
return processByteAddressBufferLoadStore(expr, 4, /*doStore*/ false);
case IntrinsicOp::MOP_Store:
return processByteAddressBufferLoadStore(expr, 1, /*doStore*/ true);
case IntrinsicOp::MOP_Store2:
return processByteAddressBufferLoadStore(expr, 2, /*doStore*/ true);
case IntrinsicOp::MOP_Store3:
return processByteAddressBufferLoadStore(expr, 3, /*doStore*/ true);
case IntrinsicOp::MOP_Store4:
return processByteAddressBufferLoadStore(expr, 4, /*doStore*/ true);
case IntrinsicOp::MOP_GetDimensions:
retVal = processGetDimensions(expr);
break;
case IntrinsicOp::MOP_CalculateLevelOfDetail:
retVal = processTextureLevelOfDetail(expr, /* unclamped */ false);
break;
case IntrinsicOp::MOP_CalculateLevelOfDetailUnclamped:
retVal = processTextureLevelOfDetail(expr, /* unclamped */ true);
break;
case IntrinsicOp::MOP_IncrementCounter:
retVal = spvBuilder.createUnaryOp(
spv::Op::OpBitcast, astContext.UnsignedIntTy,
incDecRWACSBufferCounter(expr, /*isInc*/ true),
expr->getCallee()->getExprLoc(), expr->getCallee()->getSourceRange());
break;
case IntrinsicOp::MOP_DecrementCounter:
retVal = spvBuilder.createUnaryOp(
spv::Op::OpBitcast, astContext.UnsignedIntTy,
incDecRWACSBufferCounter(expr, /*isInc*/ false),
expr->getCallee()->getExprLoc(), expr->getCallee()->getSourceRange());
break;
case IntrinsicOp::MOP_Append:
if (hlsl::IsHLSLStreamOutputType(
expr->getImplicitObjectArgument()->getType()))
return processStreamOutputAppend(expr);
else
return processACSBufferAppendConsume(expr);
case IntrinsicOp::MOP_Consume:
return processACSBufferAppendConsume(expr);
case IntrinsicOp::MOP_RestartStrip:
retVal = processStreamOutputRestart(expr);
break;
case IntrinsicOp::MOP_InterlockedAdd:
case IntrinsicOp::MOP_InterlockedAnd:
case IntrinsicOp::MOP_InterlockedOr:
case IntrinsicOp::MOP_InterlockedXor:
case IntrinsicOp::MOP_InterlockedUMax:
case IntrinsicOp::MOP_InterlockedUMin:
case IntrinsicOp::MOP_InterlockedMax:
case IntrinsicOp::MOP_InterlockedMin:
case IntrinsicOp::MOP_InterlockedExchange:
case IntrinsicOp::MOP_InterlockedCompareExchange:
case IntrinsicOp::MOP_InterlockedCompareStore:
retVal = processRWByteAddressBufferAtomicMethods(opcode, expr);
break;
case IntrinsicOp::MOP_GetSamplePosition:
retVal = processGetSamplePosition(expr);
break;
case IntrinsicOp::MOP_SubpassLoad:
retVal = processSubpassLoad(expr);
break;
case IntrinsicOp::MOP_GatherCmpGreen:
case IntrinsicOp::MOP_GatherCmpBlue:
case IntrinsicOp::MOP_GatherCmpAlpha:
emitError("no equivalent for %0 intrinsic method in Vulkan",
expr->getCallee()->getExprLoc())
<< getFunctionOrOperatorName(expr->getMethodDecl(), true);
return nullptr;
case IntrinsicOp::MOP_TraceRayInline:
return processTraceRayInline(expr);
case IntrinsicOp::MOP_Abort:
case IntrinsicOp::MOP_CandidateGeometryIndex:
case IntrinsicOp::MOP_CandidateInstanceContributionToHitGroupIndex:
case IntrinsicOp::MOP_CandidateInstanceID:
case IntrinsicOp::MOP_CandidateInstanceIndex:
case IntrinsicOp::MOP_CandidateObjectRayDirection:
case IntrinsicOp::MOP_CandidateObjectRayOrigin:
case IntrinsicOp::MOP_CandidateObjectToWorld3x4:
case IntrinsicOp::MOP_CandidateObjectToWorld4x3:
case IntrinsicOp::MOP_CandidatePrimitiveIndex:
case IntrinsicOp::MOP_CandidateProceduralPrimitiveNonOpaque:
case IntrinsicOp::MOP_CandidateTriangleBarycentrics:
case IntrinsicOp::MOP_CandidateTriangleFrontFace:
case IntrinsicOp::MOP_CandidateTriangleRayT:
case IntrinsicOp::MOP_CandidateType:
case IntrinsicOp::MOP_CandidateWorldToObject3x4:
case IntrinsicOp::MOP_CandidateWorldToObject4x3:
case IntrinsicOp::MOP_CommitNonOpaqueTriangleHit:
case IntrinsicOp::MOP_CommitProceduralPrimitiveHit:
case IntrinsicOp::MOP_CommittedGeometryIndex:
case IntrinsicOp::MOP_CommittedInstanceContributionToHitGroupIndex:
case IntrinsicOp::MOP_CommittedInstanceID:
case IntrinsicOp::MOP_CommittedInstanceIndex:
case IntrinsicOp::MOP_CommittedObjectRayDirection:
case IntrinsicOp::MOP_CommittedObjectRayOrigin:
case IntrinsicOp::MOP_CommittedObjectToWorld3x4:
case IntrinsicOp::MOP_CommittedObjectToWorld4x3:
case IntrinsicOp::MOP_CommittedPrimitiveIndex:
case IntrinsicOp::MOP_CommittedRayT:
case IntrinsicOp::MOP_CommittedStatus:
case IntrinsicOp::MOP_CommittedTriangleBarycentrics:
case IntrinsicOp::MOP_CommittedTriangleFrontFace:
case IntrinsicOp::MOP_CommittedWorldToObject3x4:
case IntrinsicOp::MOP_CommittedWorldToObject4x3:
case IntrinsicOp::MOP_Proceed:
case IntrinsicOp::MOP_RayFlags:
case IntrinsicOp::MOP_RayTMin:
case IntrinsicOp::MOP_WorldRayDirection:
case IntrinsicOp::MOP_WorldRayOrigin:
return processRayQueryIntrinsics(expr, opcode);
default:
emitError("intrinsic '%0' method unimplemented",
expr->getCallee()->getExprLoc())
<< getFunctionOrOperatorName(expr->getDirectCallee(), true);
return nullptr;
}
if (retVal)
retVal->setRValue();
return retVal;
}
SpirvInstruction *SpirvEmitter::createImageSample(
QualType retType, QualType imageType, SpirvInstruction *image,
SpirvInstruction *sampler, SpirvInstruction *coordinate,
SpirvInstruction *compareVal, SpirvInstruction *bias, SpirvInstruction *lod,
std::pair<SpirvInstruction *, SpirvInstruction *> grad,
SpirvInstruction *constOffset, SpirvInstruction *varOffset,
SpirvInstruction *constOffsets, SpirvInstruction *sample,
SpirvInstruction *minLod, SpirvInstruction *residencyCodeId,
SourceLocation loc, SourceRange range) {
if (varOffset)
needsLegalization = true;
// SampleDref* instructions in SPIR-V always return a scalar.
// They also have the correct type in HLSL.
if (compareVal) {
return spvBuilder.createImageSample(
retType, imageType, image, sampler, coordinate, compareVal, bias, lod,
grad, constOffset, varOffset, constOffsets, sample, minLod,
residencyCodeId, loc, range);
}
// Non-Dref Sample instructions in SPIR-V must always return a vec4.
auto texelType = retType;
QualType elemType = {};
uint32_t retVecSize = 0;
if (isVectorType(retType, &elemType, &retVecSize) && retVecSize != 4) {
texelType = astContext.getExtVectorType(elemType, 4);
} else if (isScalarType(retType)) {
retVecSize = 1;
elemType = retType;
texelType = astContext.getExtVectorType(retType, 4);
}
// The Lod and Grad image operands requires explicit-lod instructions.
// Otherwise we use implicit-lod instructions.
const bool isExplicit = lod || (grad.first && grad.second);
// Implicit-lod instructions are only allowed in pixel and compute shaders.
if (!spvContext.isPS() && !spvContext.isCS() && !isExplicit)
emitError("sampling with implicit lod is only allowed in fragment and "
"compute shaders",
loc);
auto *retVal = spvBuilder.createImageSample(
texelType, imageType, image, sampler, coordinate, compareVal, bias, lod,
grad, constOffset, varOffset, constOffsets, sample, minLod,
residencyCodeId, loc, range);
// Extract smaller vector from the vec4 result if necessary.
if (texelType != retType) {
retVal = extractVecFromVec4(retVal, retVecSize, elemType, loc);
}
return retVal;
}
SpirvInstruction *
SpirvEmitter::processTextureSampleGather(const CXXMemberCallExpr *expr,
const bool isSample) {
// Signatures:
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D:
// DXGI_FORMAT Object.Sample(sampler_state S,
// float Location
// [, int Offset]
// [, float Clamp]
// [, out uint Status]);
//
// For TextureCube and TextureCubeArray:
// DXGI_FORMAT Object.Sample(sampler_state S,
// float Location
// [, float Clamp]
// [, out uint Status]);
//
// For Texture2D/Texture2DArray:
// <Template Type>4 Object.Gather(sampler_state S,
// float2|3|4 Location,
// int2 Offset
// [, uint Status]);
//
// For TextureCube/TextureCubeArray:
// <Template Type>4 Object.Gather(sampler_state S,
// float2|3|4 Location
// [, uint Status]);
//
// Other Texture types do not have a Gather method.
const auto numArgs = expr->getNumArgs();
const auto loc = expr->getExprLoc();
const auto range = expr->getSourceRange();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
SpirvInstruction *clamp = nullptr;
if (numArgs > 2 && expr->getArg(2)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(2));
else if (numArgs > 3 && expr->getArg(3)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(3));
const bool hasClampArg = (clamp != 0);
const auto status =
hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
// Subtract 1 for status (if it exists), subtract 1 for clamp (if it exists),
// and subtract 2 for sampler_state and location.
const bool hasOffsetArg = numArgs - hasStatusArg - hasClampArg - 2 > 0;
const auto *imageExpr = expr->getImplicitObjectArgument();
const QualType imageType = imageExpr->getType();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
// .Sample()/.Gather() may have a third optional paramter for offset.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 2, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
if (isSample) {
if (spvContext.isCS()) {
addDerivativeGroupExecutionMode();
}
return createImageSample(retType, imageType, image, sampler, coordinate,
/*compareVal*/ nullptr, /*bias*/ nullptr,
/*lod*/ nullptr, std::make_pair(nullptr, nullptr),
constOffset, varOffset,
/*constOffsets*/ nullptr, /*sampleNumber*/ nullptr,
/*minLod*/ clamp, status,
expr->getCallee()->getLocStart(), range);
} else {
return spvBuilder.createImageGather(
retType, imageType, image, sampler, coordinate,
// .Gather() doc says we return four components of red data.
spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0)),
/*compareVal*/ nullptr, constOffset, varOffset,
/*constOffsets*/ nullptr, /*sampleNumber*/ nullptr, status, loc, range);
}
}
SpirvInstruction *
SpirvEmitter::processTextureSampleBiasLevel(const CXXMemberCallExpr *expr,
const bool isBias) {
// Signatures:
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray, and Texture3D:
// DXGI_FORMAT Object.SampleBias(sampler_state S,
// float Location,
// float Bias
// [, int Offset]
// [, float clamp]
// [, out uint Status]);
//
// For TextureCube and TextureCubeArray:
// DXGI_FORMAT Object.SampleBias(sampler_state S,
// float Location,
// float Bias
// [, float clamp]
// [, out uint Status]);
//
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray, and Texture3D:
// DXGI_FORMAT Object.SampleLevel(sampler_state S,
// float Location,
// float LOD
// [, int Offset]
// [, out uint Status]);
//
// For TextureCube and TextureCubeArray:
// DXGI_FORMAT Object.SampleLevel(sampler_state S,
// float Location,
// float LOD
// [, out uint Status]);
const auto numArgs = expr->getNumArgs();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
SpirvInstruction *clamp = nullptr;
// The .SampleLevel() methods do not take the clamp argument.
if (isBias) {
if (numArgs > 3 && expr->getArg(3)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(3));
else if (numArgs > 4 && expr->getArg(4)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(4));
}
const bool hasClampArg = clamp != nullptr;
// Subtract 1 for clamp (if it exists), 1 for status (if it exists),
// and 3 for sampler_state, location, and Bias/LOD.
const bool hasOffsetArg = numArgs - hasClampArg - hasStatusArg - 3 > 0;
const auto *imageExpr = expr->getImplicitObjectArgument();
const QualType imageType = imageExpr->getType();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
SpirvInstruction *lod = nullptr;
SpirvInstruction *bias = nullptr;
if (isBias) {
bias = doExpr(expr->getArg(2));
} else {
lod = doExpr(expr->getArg(2));
}
// If offset is present in .Bias()/.SampleLevel(), it is the fourth argument.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 3, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
if (!lod && spvContext.isCS()) {
addDerivativeGroupExecutionMode();
}
return createImageSample(
retType, imageType, image, sampler, coordinate,
/*compareVal*/ nullptr, bias, lod, std::make_pair(nullptr, nullptr),
constOffset, varOffset,
/*constOffsets*/ nullptr, /*sampleNumber*/ nullptr,
/*minLod*/ clamp, status, expr->getCallee()->getLocStart(),
expr->getSourceRange());
}
SpirvInstruction *
SpirvEmitter::processTextureSampleGrad(const CXXMemberCallExpr *expr) {
// Signature:
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray, and Texture3D:
// DXGI_FORMAT Object.SampleGrad(sampler_state S,
// float Location,
// float DDX,
// float DDY
// [, int Offset]
// [, float Clamp]
// [, out uint Status]);
//
// For TextureCube and TextureCubeArray:
// DXGI_FORMAT Object.SampleGrad(sampler_state S,
// float Location,
// float DDX,
// float DDY
// [, float Clamp]
// [, out uint Status]);
const auto numArgs = expr->getNumArgs();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
SpirvInstruction *clamp = nullptr;
if (numArgs > 4 && expr->getArg(4)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(4));
else if (numArgs > 5 && expr->getArg(5)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(5));
const bool hasClampArg = clamp != nullptr;
// Subtract 1 for clamp (if it exists), 1 for status (if it exists),
// and 4 for sampler_state, location, DDX, and DDY;
const bool hasOffsetArg = numArgs - hasClampArg - hasStatusArg - 4 > 0;
const auto *imageExpr = expr->getImplicitObjectArgument();
const QualType imageType = imageExpr->getType();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *ddx = doExpr(expr->getArg(2));
auto *ddy = doExpr(expr->getArg(3));
// If offset is present in .SampleGrad(), it is the fifth argument.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 4, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
return createImageSample(
retType, imageType, image, sampler, coordinate,
/*compareVal*/ nullptr, /*bias*/ nullptr,
/*lod*/ nullptr, std::make_pair(ddx, ddy), constOffset, varOffset,
/*constOffsets*/ nullptr, /*sampleNumber*/ nullptr,
/*minLod*/ clamp, status, expr->getCallee()->getLocStart(),
expr->getSourceRange());
}
SpirvInstruction *
SpirvEmitter::processTextureSampleCmp(const CXXMemberCallExpr *expr) {
// .SampleCmp() Signature:
//
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray:
// float Object.SampleCmp(
// SamplerComparisonState S,
// float Location,
// float CompareValue
// [, int Offset]
// [, float Clamp]
// [, out uint Status]
// );
//
// For TextureCube and TextureCubeArray:
// float Object.SampleCmp(
// SamplerComparisonState S,
// float Location,
// float CompareValue
// [, float Clamp]
// [, out uint Status]
// );
const auto numArgs = expr->getNumArgs();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
SpirvInstruction *clamp = nullptr;
if (numArgs > 3 && expr->getArg(3)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(3));
else if (numArgs > 4 && expr->getArg(4)->getType()->isFloatingType())
clamp = doExpr(expr->getArg(4));
const bool hasClampArg = clamp != nullptr;
const auto *imageExpr = expr->getImplicitObjectArgument();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *compareVal = doExpr(expr->getArg(2));
// If offset is present in .SampleCmp(), it will be the fourth argument.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
// Subtract 1 for clamp (if it exists), 1 for status (if it exists),
// and 3 for sampler_state, location, and compare_value.
const bool hasOffsetArg = numArgs - hasStatusArg - hasClampArg - 3 > 0;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 3, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
const auto imageType = imageExpr->getType();
if (spvContext.isCS()) {
addDerivativeGroupExecutionMode();
}
return createImageSample(
retType, imageType, image, sampler, coordinate, compareVal,
/*bias*/ nullptr, /*lod*/ nullptr, std::make_pair(nullptr, nullptr),
constOffset, varOffset, /*constOffsets*/ nullptr,
/*sampleNumber*/ nullptr, /*minLod*/ clamp, status,
expr->getCallee()->getLocStart(), expr->getSourceRange());
}
SpirvInstruction *
SpirvEmitter::processTextureSampleCmpLevelZero(const CXXMemberCallExpr *expr) {
// .SampleCmpLevelZero() is identical to .SampleCmp() on mipmap level 0 only.
// It never takes a clamp argument, which is good because lod and clamp may
// not be used together.
// .SampleCmpLevel() is identical to .SampleCmpLevel, except the LOD level
// is taken as a float argument.
//
// .SampleCmpLevelZero() Signature:
//
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray:
// float Object.SampleCmpLevelZero(
// SamplerComparisonState S,
// float Location,
// float CompareValue
// [, int Offset]
// [, out uint Status]
// );
//
// For TextureCube and TextureCubeArray:
// float Object.SampleCmpLevelZero(
// SamplerComparisonState S,
// float Location,
// float CompareValue
// [, out uint Status]
// );
const auto numArgs = expr->getNumArgs();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
const auto *imageExpr = expr->getImplicitObjectArgument();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *compareVal = doExpr(expr->getArg(2));
auto *lod =
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(0.0f));
// If offset is present in .SampleCmp(), it will be the fourth argument.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
const bool hasOffsetArg = numArgs - hasStatusArg - 3 > 0;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 3, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
const auto imageType = imageExpr->getType();
return createImageSample(
retType, imageType, image, sampler, coordinate, compareVal,
/*bias*/ nullptr, /*lod*/ lod, std::make_pair(nullptr, nullptr),
constOffset, varOffset, /*constOffsets*/ nullptr,
/*sampleNumber*/ nullptr, /*clamp*/ nullptr, status,
expr->getCallee()->getLocStart(), expr->getSourceRange());
}
SpirvInstruction *
SpirvEmitter::processTextureSampleCmpLevel(const CXXMemberCallExpr *expr) {
// .SampleCmpLevel() is identical to .SampleCmpLevel, except the LOD level
// is taken as a float argument.
//
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray:
// float Object.SampleCmpLevel(
// SamplerComparisonState S,
// float Location,
// float CompareValue,
// float LOD,
// [, int Offset]
// [, out uint Status]
// );
//
// For TextureCube and TextureCubeArray:
// float Object.SampleCmpLevel(
// SamplerComparisonState S,
// float Location,
// float CompareValue
// float LOD,
// [, out uint Status]
// );
const auto numArgs = expr->getNumArgs();
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
const auto *imageExpr = expr->getImplicitObjectArgument();
auto *image = loadIfGLValue(imageExpr);
auto *sampler = doExpr(expr->getArg(0));
auto *coordinate = doExpr(expr->getArg(1));
auto *compareVal = doExpr(expr->getArg(2));
auto *lod = doExpr(expr->getArg(3));
// If offset is present in .SampleCmp(), it will be the fourth argument.
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
const bool hasOffsetArg = numArgs - hasStatusArg - 4 > 0;
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 4, &constOffset, &varOffset);
const auto retType = expr->getDirectCallee()->getReturnType();
const auto imageType = imageExpr->getType();
return createImageSample(
retType, imageType, image, sampler, coordinate, compareVal,
/*bias*/ nullptr, /*lod*/ lod, std::make_pair(nullptr, nullptr),
constOffset, varOffset, /*constOffsets*/ nullptr,
/*sampleNumber*/ nullptr, /*clamp*/ nullptr, status,
expr->getCallee()->getLocStart(), expr->getSourceRange());
}
SpirvInstruction *
SpirvEmitter::processBufferTextureLoad(const CXXMemberCallExpr *expr) {
// Signature:
// For Texture1D, Texture1DArray, Texture2D, Texture2DArray, Texture3D:
// ret Object.Load(int Location
// [, int Offset]
// [, uint status]);
//
// For Texture2DMS and Texture2DMSArray, there is one additional argument:
// ret Object.Load(int Location
// [, int SampleIndex]
// [, int Offset]
// [, uint status]);
//
// For (RW)Buffer, RWTexture1D, RWTexture1DArray, RWTexture2D,
// RWTexture2DArray, RWTexture3D:
// ret Object.Load (int Location
// [, uint status]);
//
// Note: (RW)ByteAddressBuffer and (RW)StructuredBuffer types also have Load
// methods that take an additional Status argument. However, since these types
// are not represented as OpTypeImage in SPIR-V, we don't have a way of
// figuring out the Residency Code for them. Therefore having the Status
// argument for these types is not supported.
//
// For (RW)ByteAddressBuffer:
// ret Object.{Load,Load2,Load3,Load4} (int Location
// [, uint status]);
//
// For (RW)StructuredBuffer:
// ret Object.Load (int Location
// [, uint status]);
//
const auto *object = expr->getImplicitObjectArgument();
const auto objectType = object->getType();
if (isRWByteAddressBuffer(objectType) || isByteAddressBuffer(objectType))
return processByteAddressBufferLoadStore(expr, 1, /*doStore*/ false);
if (isStructuredBuffer(objectType))
return processStructuredBufferLoad(expr);
const auto numArgs = expr->getNumArgs();
const auto *locationArg = expr->getArg(0);
const bool textureMS = isTextureMS(objectType);
const bool hasStatusArg =
expr->getArg(numArgs - 1)->getType()->isUnsignedIntegerType();
auto *status = hasStatusArg ? doExpr(expr->getArg(numArgs - 1)) : nullptr;
auto loc = expr->getExprLoc();
auto range = expr->getSourceRange();
if (isBuffer(objectType) || isRWBuffer(objectType) || isRWTexture(objectType))
return processBufferTextureLoad(object, doExpr(locationArg),
/*constOffset*/ nullptr, /*lod*/ nullptr,
/*residencyCode*/ status, loc, range);
// Subtract 1 for status (if it exists), and 1 for sampleIndex (if it exists),
// and 1 for location.
const bool hasOffsetArg = numArgs - hasStatusArg - textureMS - 1 > 0;
if (isTexture(objectType)) {
// .Load() has a second optional paramter for offset.
SpirvInstruction *location = doExpr(locationArg);
SpirvInstruction *constOffset = nullptr, *varOffset = nullptr;
SpirvInstruction *coordinate = location, *lod = nullptr;
if (textureMS) {
// SampleIndex is only available when the Object is of Texture2DMS or
// Texture2DMSArray types. Under those cases, Offset will be the third
// parameter (index 2).
lod = doExpr(expr->getArg(1));
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 2, &constOffset, &varOffset);
} else {
// For Texture Load() functions, the location parameter is a vector
// that consists of both the coordinate and the mipmap level (via the
// last vector element). We need to split it here since the
// OpImageFetch SPIR-V instruction encodes them as separate arguments.
splitVecLastElement(locationArg->getType(), location, &coordinate, &lod,
locationArg->getExprLoc());
// For textures other than Texture2DMS(Array), offset should be the
// second parameter (index 1).
if (hasOffsetArg)
handleOffsetInMethodCall(expr, 1, &constOffset, &varOffset);
}
if (varOffset) {
emitError(
"Offsets to texture access operations must be immediate values.",
object->getExprLoc());
return nullptr;
}
return processBufferTextureLoad(object, coordinate, constOffset, lod,
status, loc, range);
}
emitError("Load() of the given object type unimplemented",
object->getExprLoc());
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processGetDimensions(const CXXMemberCallExpr *expr) {
const auto objectType = expr->getImplicitObjectArgument()->getType();
if (isTexture(objectType) || isRWTexture(objectType) ||
isBuffer(objectType) || isRWBuffer(objectType)) {
return processBufferTextureGetDimensions(expr);
} else if (isByteAddressBuffer(objectType) ||
isRWByteAddressBuffer(objectType) ||
isStructuredBuffer(objectType) ||
isAppendStructuredBuffer(objectType) ||
isConsumeStructuredBuffer(objectType)) {
return processByteAddressBufferStructuredBufferGetDimensions(expr);
} else {
emitError("GetDimensions() of the given object type unimplemented",
expr->getExprLoc());
return nullptr;
}
}
SpirvInstruction *
SpirvEmitter::doCXXOperatorCallExpr(const CXXOperatorCallExpr *expr,
SourceRange rangeOverride) {
{ // Handle Buffer/RWBuffer/Texture/RWTexture indexing
const Expr *baseExpr = nullptr;
const Expr *indexExpr = nullptr;
const Expr *lodExpr = nullptr;
// For Textures, regular indexing (operator[]) uses slice 0.
if (isBufferTextureIndexing(expr, &baseExpr, &indexExpr)) {
auto *lod = isTexture(baseExpr->getType())
? spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, 0))
: nullptr;
return processBufferTextureLoad(baseExpr, doExpr(indexExpr),
/*constOffset*/ nullptr, lod,
/*residencyCode*/ nullptr,
expr->getExprLoc());
}
// .mips[][] or .sample[][] must use the correct slice.
if (isTextureMipsSampleIndexing(expr, &baseExpr, &indexExpr, &lodExpr)) {
auto *lod = doExpr(lodExpr);
return processBufferTextureLoad(baseExpr, doExpr(indexExpr),
/*constOffset*/ nullptr, lod,
/*residencyCode*/ nullptr,
expr->getExprLoc());
}
}
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
{ // Handle ResourceDescriptorHeap and SamplerDescriptorHeap.
if (isDescriptorHeap(expr)) {
const Expr *baseExpr = nullptr;
const Expr *indexExpr = nullptr;
getDescriptorHeapOperands(expr, &baseExpr, &indexExpr);
const Expr *parentExpr = cast<CastExpr>(parentMap->getParent(expr));
QualType resourceType = parentExpr->getType();
const auto *declRefExpr = dyn_cast<DeclRefExpr>(baseExpr->IgnoreCasts());
auto *decl = cast<VarDecl>(declRefExpr->getDecl());
auto *var = declIdMapper.createResourceHeap(decl, resourceType);
auto *index = doExpr(indexExpr);
auto *accessChainPtr = spvBuilder.createAccessChain(
resourceType, var, index, baseExpr->getExprLoc(), range);
if (!isAKindOfStructuredOrByteBuffer(resourceType) &&
baseExpr->isGLValue())
return spvBuilder.createLoad(resourceType, accessChainPtr,
baseExpr->getExprLoc(), range);
return accessChainPtr;
}
}
llvm::SmallVector<SpirvInstruction *, 4> indices;
const Expr *baseExpr = collectArrayStructIndices(
expr, /*rawIndex*/ false, /*rawIndices*/ nullptr, &indices);
auto base = loadIfAliasVarRef(baseExpr);
if (base == nullptr ||
indices.empty()) // For indexing into size-1 vectors and 1xN matrices
return base;
// If we are indexing into a rvalue, to use OpAccessChain, we first need
// to create a local variable to hold the rvalue.
//
// TODO: We can optimize the codegen by emitting OpCompositeExtract if
// all indices are contant integers.
if (base->isRValue()) {
base = createTemporaryVar(baseExpr->getType(), "vector", base,
baseExpr->getExprLoc());
}
return derefOrCreatePointerToValue(baseExpr->getType(), base, expr->getType(),
indices, baseExpr->getExprLoc(), range);
}
SpirvInstruction *
SpirvEmitter::doExtMatrixElementExpr(const ExtMatrixElementExpr *expr) {
const Expr *baseExpr = expr->getBase();
auto *baseInfo = doExpr(baseExpr);
const auto layoutRule = baseInfo->getLayoutRule();
const auto elemType = hlsl::GetHLSLMatElementType(baseExpr->getType());
const auto accessor = expr->getEncodedElementAccess();
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(baseExpr->getType(), rowCount, colCount);
// Construct a temporary vector out of all elements accessed:
// 1. Create access chain for each element using OpAccessChain
// 2. Load each element using OpLoad
// 3. Create the vector using OpCompositeConstruct
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < accessor.Count; ++i) {
uint32_t row = 0, col = 0;
SpirvInstruction *elem = nullptr;
accessor.GetPosition(i, &row, &col);
llvm::SmallVector<uint32_t, 2> indices;
// If the matrix only has one row/column, we are indexing into a vector
// then. Only one index is needed for such cases.
if (rowCount > 1)
indices.push_back(row);
if (colCount > 1)
indices.push_back(col);
if (!baseInfo->isRValue()) {
llvm::SmallVector<SpirvInstruction *, 2> indexInstructions(indices.size(),
nullptr);
for (uint32_t i = 0; i < indices.size(); ++i)
indexInstructions[i] = spvBuilder.getConstantInt(
astContext.IntTy, llvm::APInt(32, indices[i], true));
if (!indices.empty()) {
assert(!baseInfo->isRValue());
// Load the element via access chain
elem = spvBuilder.createAccessChain(
elemType, baseInfo, indexInstructions, baseExpr->getLocStart());
} else {
// The matrix is of size 1x1. No need to use access chain, base should
// be the source pointer.
elem = baseInfo;
}
elem = spvBuilder.createLoad(elemType, elem, baseExpr->getLocStart());
} else { // e.g., (mat1 + mat2)._m11
elem = spvBuilder.createCompositeExtract(elemType, baseInfo, indices,
baseExpr->getLocStart());
}
elements.push_back(elem);
}
const auto size = elements.size();
auto *value = elements.front();
if (size > 1) {
value = spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(elemType, size), elements,
expr->getLocStart());
}
// Note: Special-case: Booleans have no physical layout, and therefore when
// layout is required booleans are represented as unsigned integers.
// Therefore, after loading the uint we should convert it boolean.
if (elemType->isBooleanType() && layoutRule != SpirvLayoutRule::Void) {
const auto fromType =
size == 1 ? astContext.UnsignedIntTy
: astContext.getExtVectorType(astContext.UnsignedIntTy, size);
const auto toType =
size == 1 ? astContext.BoolTy
: astContext.getExtVectorType(astContext.BoolTy, size);
value = castToBool(value, fromType, toType, expr->getLocStart());
}
if (!value)
return nullptr;
value->setRValue();
return value;
}
SpirvInstruction *
SpirvEmitter::doHLSLVectorElementExpr(const HLSLVectorElementExpr *expr,
SourceRange rangeOverride) {
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
const Expr *baseExpr = nullptr;
hlsl::VectorMemberAccessPositions accessor;
condenseVectorElementExpr(expr, &baseExpr, &accessor);
const QualType baseType = baseExpr->getType();
assert(hlsl::IsHLSLVecType(baseType));
const auto baseSize = hlsl::GetHLSLVecSize(baseType);
const auto accessorSize = static_cast<size_t>(accessor.Count);
// Depending on the number of elements selected, we emit different
// instructions.
// For vectors of size greater than 1, if we are only selecting one element,
// typical access chain or composite extraction should be fine. But if we
// are selecting more than one elements, we must resolve to vector specific
// operations.
// For size-1 vectors, if we are selecting their single elements multiple
// times, we need composite construct instructions.
if (accessorSize == 1) {
auto *baseInfo = doExpr(baseExpr, range);
if (!baseInfo || baseSize == 1) {
// Selecting one element from a size-1 vector. The underlying vector is
// already treated as a scalar.
return baseInfo;
}
// If the base is an lvalue, we should emit an access chain instruction
// so that we can load/store the specified element. For rvalue base,
// we should use composite extraction. We should check the immediate base
// instead of the original base here since we can have something like
// v.xyyz to turn a lvalue v into rvalue.
const auto type = expr->getType();
if (!baseInfo->isRValue()) { // E.g., v.x;
auto *index = spvBuilder.getConstantInt(
astContext.IntTy, llvm::APInt(32, accessor.Swz0, true));
// We need a lvalue here. Do not try to load.
return spvBuilder.createAccessChain(type, baseInfo, {index},
baseExpr->getLocStart(), range);
} else { // E.g., (v + w).x;
// The original base vector may not be a rvalue. Need to load it if
// it is lvalue since ImplicitCastExpr (LValueToRValue) will be missing
// for that case.
SpirvInstruction *result = spvBuilder.createCompositeExtract(
type, baseInfo, {accessor.Swz0}, baseExpr->getLocStart(), range);
// Special-case: Booleans in SPIR-V do not have a physical layout. Uint is
// used to represent them when layout is required.
if (expr->getType()->isBooleanType() &&
baseInfo->getLayoutRule() != SpirvLayoutRule::Void)
result = castToBool(result, astContext.UnsignedIntTy, astContext.BoolTy,
expr->getLocStart(), range);
return result;
}
}
if (baseSize == 1) {
// Selecting more than one element from a size-1 vector, for example,
// <scalar>.xx. Construct the vector.
auto *info = loadIfGLValue(baseExpr, range);
const auto type = expr->getType();
llvm::SmallVector<SpirvInstruction *, 4> components(accessorSize, info);
info = spvBuilder.createCompositeConstruct(type, components,
expr->getLocStart(), range);
if (!info)
return nullptr;
info->setRValue();
return info;
}
llvm::SmallVector<uint32_t, 4> selectors;
selectors.resize(accessorSize);
// Whether we are selecting elements in the original order
bool originalOrder = baseSize == accessorSize;
for (uint32_t i = 0; i < accessorSize; ++i) {
accessor.GetPosition(i, &selectors[i]);
// We can select more elements than the vector provides. This handles
// that case too.
originalOrder &= selectors[i] == i;
}
auto *info = loadIfGLValue(baseExpr, range);
if (originalOrder) {
// If the elements are simply the original vector, then return it without a
// vector shuffle.
return info;
}
// Use base for both vectors. But we are only selecting values from the
// first one.
return spvBuilder.createVectorShuffle(expr->getType(), info, info, selectors,
expr->getLocStart(), range);
}
SpirvInstruction *SpirvEmitter::doInitListExpr(const InitListExpr *expr,
SourceRange rangeOverride) {
if (auto *id =
constEvaluator.tryToEvaluateAsConst(expr, isSpecConstantMode)) {
id->setRValue();
return id;
}
SourceRange range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
auto *result = InitListHandler(astContext, *this).processInit(expr, range);
if (result == nullptr) {
return nullptr;
}
result->setRValue();
return result;
}
bool SpirvEmitter::isNoInterpMemberExpr(const MemberExpr *expr) {
bool ret = false;
FieldDecl *D = dyn_cast<FieldDecl>(expr->getMemberDecl());
while (D != nullptr) {
if (D->hasAttr<HLSLNoInterpolationAttr>() ||
D->getParent()->hasAttr<HLSLNoInterpolationAttr>()) {
ret = true;
}
D = dyn_cast<FieldDecl>(D->getParent());
}
auto *base = dyn_cast<MemberExpr>(expr->getBase());
return ret || (base != nullptr && isNoInterpMemberExpr(base));
}
SpirvInstruction *SpirvEmitter::doMemberExpr(const MemberExpr *expr,
SourceRange rangeOverride) {
llvm::SmallVector<SpirvInstruction *, 4> indices;
const Expr *base = collectArrayStructIndices(
expr, /*rawIndex*/ false, /*rawIndices*/ nullptr, &indices);
const SourceRange &range =
(rangeOverride != SourceRange()) ? rangeOverride : expr->getSourceRange();
auto *instr = loadIfAliasVarRef(base, range);
const auto &loc = base->getExprLoc();
if (!instr || indices.empty()) {
return instr;
}
const auto *fieldDecl = dyn_cast<FieldDecl>(expr->getMemberDecl());
if (!fieldDecl || !fieldDecl->isBitField()) {
SpirvInstruction *retInstr = derefOrCreatePointerToValue(
base->getType(), instr, expr->getType(), indices, loc, range);
if (isNoInterpMemberExpr(expr))
retInstr->setNoninterpolated();
return retInstr;
}
auto baseType = expr->getBase()->getType();
if (baseType->isPointerType()) {
baseType = baseType->getPointeeType();
}
const uint32_t indexAST =
getNumBaseClasses(baseType) + fieldDecl->getFieldIndex();
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const StructType *spirvStructType =
lowerStructType(spirvOptions, lowerTypeVisitor, baseType);
assert(spirvStructType);
const uint32_t bitfieldOffset =
spirvStructType->getFields()[indexAST].bitfield->offsetInBits;
const uint32_t bitfieldSize =
spirvStructType->getFields()[indexAST].bitfield->sizeInBits;
BitfieldInfo bitfieldInfo{bitfieldOffset, bitfieldSize};
if (instr->isRValue()) {
SpirvVariable *variable = turnIntoLValue(base->getType(), instr, loc);
SpirvInstruction *chain = spvBuilder.createAccessChain(
expr->getType(), variable, indices, loc, range);
chain->setBitfieldInfo(bitfieldInfo);
return spvBuilder.createLoad(expr->getType(), chain, loc);
}
SpirvInstruction *chain =
spvBuilder.createAccessChain(expr->getType(), instr, indices, loc, range);
chain->setBitfieldInfo(bitfieldInfo);
return chain;
}
SpirvVariable *SpirvEmitter::createTemporaryVar(QualType type,
llvm::StringRef name,
SpirvInstruction *init,
SourceLocation loc) {
// We are creating a temporary variable in the Function storage class here,
// which means it has void layout rule.
const std::string varName = "temp.var." + name.str();
auto *var = spvBuilder.addFnVar(type, loc, varName);
storeValue(var, init, type, loc);
return var;
}
SpirvInstruction *SpirvEmitter::doUnaryOperator(const UnaryOperator *expr) {
const auto opcode = expr->getOpcode();
const auto *subExpr = expr->getSubExpr();
const auto subType = subExpr->getType();
auto *subValue = doExpr(subExpr);
SourceRange range = expr->getSourceRange();
switch (opcode) {
case UO_PreInc:
case UO_PreDec:
case UO_PostInc:
case UO_PostDec: {
const bool isPre = opcode == UO_PreInc || opcode == UO_PreDec;
const bool isInc = opcode == UO_PreInc || opcode == UO_PostInc;
const spv::Op spvOp = translateOp(isInc ? BO_Add : BO_Sub, subType);
SpirvInstruction *originValue =
subValue->isRValue()
? subValue
: spvBuilder.createLoad(subType, subValue, subExpr->getLocStart(),
range);
auto *one = hlsl::IsHLSLMatType(subType) ? getMatElemValueOne(subType)
: getValueOne(subType);
SpirvInstruction *incValue = nullptr;
if (isMxNMatrix(subType)) {
// For matrices, we can only increment/decrement each vector of it.
const auto actOnEachVec = [this, spvOp, one, expr,
range](uint32_t /*index*/, QualType inType,
QualType outType,
SpirvInstruction *lhsVec) {
auto *val = spvBuilder.createBinaryOp(spvOp, outType, lhsVec, one,
expr->getOperatorLoc(), range);
if (val)
val->setRValue();
return val;
};
incValue = processEachVectorInMatrix(subExpr, originValue, actOnEachVec,
expr->getLocStart(), range);
} else {
incValue = spvBuilder.createBinaryOp(spvOp, subType, originValue, one,
expr->getOperatorLoc(), range);
}
// If this is a RWBuffer/RWTexture assignment, OpImageWrite will be used.
// Otherwise, store using OpStore.
if (tryToAssignToRWBufferRWTexture(subExpr, incValue, range)) {
if (!incValue)
return nullptr;
incValue->setRValue();
subValue = incValue;
} else {
spvBuilder.createStore(subValue, incValue, subExpr->getLocStart(), range);
}
// Prefix increment/decrement operator returns a lvalue, while postfix
// increment/decrement returns a rvalue.
if (isPre) {
return subValue;
}
if (!originValue)
return nullptr;
originValue->setRValue();
return originValue;
}
case UO_Not: {
subValue = spvBuilder.createUnaryOp(spv::Op::OpNot, subType, subValue,
expr->getOperatorLoc(), range);
if (!subValue)
return nullptr;
subValue->setRValue();
return subValue;
}
case UO_LNot: {
// Parsing will do the necessary casting to make sure we are applying the
// ! operator on boolean values.
subValue =
spvBuilder.createUnaryOp(spv::Op::OpLogicalNot, subType, subValue,
expr->getOperatorLoc(), range);
if (!subValue)
return nullptr;
subValue->setRValue();
return subValue;
}
case UO_Plus:
// No need to do anything for the prefix + operator.
return subValue;
case UO_Minus: {
// SPIR-V have two opcodes for negating values: OpSNegate and OpFNegate.
const spv::Op spvOp = isFloatOrVecMatOfFloatType(subType)
? spv::Op::OpFNegate
: spv::Op::OpSNegate;
if (isMxNMatrix(subType)) {
// For matrices, we can only negate each vector of it.
const auto actOnEachVec = [this, spvOp, expr,
range](uint32_t /*index*/, QualType inType,
QualType outType,
SpirvInstruction *lhsVec) {
return spvBuilder.createUnaryOp(spvOp, outType, lhsVec,
expr->getOperatorLoc(), range);
};
return processEachVectorInMatrix(subExpr, subValue, actOnEachVec,
expr->getLocStart(), range);
} else {
subValue = spvBuilder.createUnaryOp(spvOp, subType, subValue,
expr->getOperatorLoc(), range);
if (!subValue)
return nullptr;
subValue->setRValue();
return subValue;
}
}
default:
break;
}
emitError("unary operator '%0' unimplemented", expr->getExprLoc())
<< expr->getOpcodeStr(opcode);
expr->dump();
return 0;
}
spv::Op SpirvEmitter::translateOp(BinaryOperator::Opcode op, QualType type) {
const bool isSintType = isSintOrVecMatOfSintType(type);
const bool isUintType = isUintOrVecMatOfUintType(type);
const bool isFloatType = isFloatOrVecMatOfFloatType(type);
#define BIN_OP_CASE_INT_FLOAT(kind, intBinOp, floatBinOp) \
\
case BO_##kind: { \
if (isSintType || isUintType) { \
return spv::Op::Op##intBinOp; \
} \
if (isFloatType) { \
return spv::Op::Op##floatBinOp; \
} \
} break
#define BIN_OP_CASE_SINT_UINT_FLOAT(kind, sintBinOp, uintBinOp, floatBinOp) \
\
case BO_##kind: { \
if (isSintType) { \
return spv::Op::Op##sintBinOp; \
} \
if (isUintType) { \
return spv::Op::Op##uintBinOp; \
} \
if (isFloatType) { \
return spv::Op::Op##floatBinOp; \
} \
} break
#define BIN_OP_CASE_SINT_UINT(kind, sintBinOp, uintBinOp) \
\
case BO_##kind: { \
if (isSintType) { \
return spv::Op::Op##sintBinOp; \
} \
if (isUintType) { \
return spv::Op::Op##uintBinOp; \
} \
} break
switch (op) {
case BO_EQ: {
if (isBoolOrVecMatOfBoolType(type))
return spv::Op::OpLogicalEqual;
if (isSintType || isUintType)
return spv::Op::OpIEqual;
if (isFloatType)
return spv::Op::OpFOrdEqual;
} break;
case BO_NE: {
if (isBoolOrVecMatOfBoolType(type))
return spv::Op::OpLogicalNotEqual;
if (isSintType || isUintType)
return spv::Op::OpINotEqual;
if (isFloatType)
return spv::Op::OpFOrdNotEqual;
} break;
// Up until HLSL 2021, all sides of the && and || expression are always
// evaluated.
case BO_LAnd:
return spv::Op::OpLogicalAnd;
case BO_LOr:
return spv::Op::OpLogicalOr;
BIN_OP_CASE_INT_FLOAT(Add, IAdd, FAdd);
BIN_OP_CASE_INT_FLOAT(AddAssign, IAdd, FAdd);
BIN_OP_CASE_INT_FLOAT(Sub, ISub, FSub);
BIN_OP_CASE_INT_FLOAT(SubAssign, ISub, FSub);
BIN_OP_CASE_INT_FLOAT(Mul, IMul, FMul);
BIN_OP_CASE_INT_FLOAT(MulAssign, IMul, FMul);
BIN_OP_CASE_SINT_UINT_FLOAT(Div, SDiv, UDiv, FDiv);
BIN_OP_CASE_SINT_UINT_FLOAT(DivAssign, SDiv, UDiv, FDiv);
// According to HLSL spec, "the modulus operator returns the remainder of
// a division." "The % operator is defined only in cases where either both
// sides are positive or both sides are negative."
//
// In SPIR-V, there are two reminder operations: Op*Rem and Op*Mod. With
// the former, the sign of a non-0 result comes from Operand 1, while
// with the latter, from Operand 2.
//
// For operands with different signs, technically we can map % to either
// Op*Rem or Op*Mod since it's undefined behavior. But it is more
// consistent with C (HLSL starts as a C derivative) and Clang frontend
// const expression evaluation if we map % to Op*Rem.
//
// Note there is no OpURem in SPIR-V.
BIN_OP_CASE_SINT_UINT_FLOAT(Rem, SRem, UMod, FRem);
BIN_OP_CASE_SINT_UINT_FLOAT(RemAssign, SRem, UMod, FRem);
BIN_OP_CASE_SINT_UINT_FLOAT(LT, SLessThan, ULessThan, FOrdLessThan);
BIN_OP_CASE_SINT_UINT_FLOAT(LE, SLessThanEqual, ULessThanEqual,
FOrdLessThanEqual);
BIN_OP_CASE_SINT_UINT_FLOAT(GT, SGreaterThan, UGreaterThan,
FOrdGreaterThan);
BIN_OP_CASE_SINT_UINT_FLOAT(GE, SGreaterThanEqual, UGreaterThanEqual,
FOrdGreaterThanEqual);
BIN_OP_CASE_SINT_UINT(And, BitwiseAnd, BitwiseAnd);
BIN_OP_CASE_SINT_UINT(AndAssign, BitwiseAnd, BitwiseAnd);
BIN_OP_CASE_SINT_UINT(Or, BitwiseOr, BitwiseOr);
BIN_OP_CASE_SINT_UINT(OrAssign, BitwiseOr, BitwiseOr);
BIN_OP_CASE_SINT_UINT(Xor, BitwiseXor, BitwiseXor);
BIN_OP_CASE_SINT_UINT(XorAssign, BitwiseXor, BitwiseXor);
BIN_OP_CASE_SINT_UINT(Shl, ShiftLeftLogical, ShiftLeftLogical);
BIN_OP_CASE_SINT_UINT(ShlAssign, ShiftLeftLogical, ShiftLeftLogical);
BIN_OP_CASE_SINT_UINT(Shr, ShiftRightArithmetic, ShiftRightLogical);
BIN_OP_CASE_SINT_UINT(ShrAssign, ShiftRightArithmetic, ShiftRightLogical);
default:
break;
}
#undef BIN_OP_CASE_INT_FLOAT
#undef BIN_OP_CASE_SINT_UINT_FLOAT
#undef BIN_OP_CASE_SINT_UINT
emitError("translating binary operator '%0' unimplemented", {})
<< BinaryOperator::getOpcodeStr(op);
return spv::Op::OpNop;
}
SpirvInstruction *
SpirvEmitter::processAssignment(const Expr *lhs, SpirvInstruction *rhs,
const bool isCompoundAssignment,
SpirvInstruction *lhsPtr, SourceRange range) {
lhs = lhs->IgnoreParenNoopCasts(astContext);
// Assigning to vector swizzling should be handled differently.
if (SpirvInstruction *result = tryToAssignToVectorElements(lhs, rhs, range))
return result;
// Assigning to matrix swizzling should be handled differently.
if (SpirvInstruction *result = tryToAssignToMatrixElements(lhs, rhs, range))
return result;
// Assigning to a RWBuffer/RWTexture should be handled differently.
if (SpirvInstruction *result =
tryToAssignToRWBufferRWTexture(lhs, rhs, range))
return result;
// Assigning to a out attribute or indices object in mesh shader should be
// handled differently.
if (SpirvInstruction *result = tryToAssignToMSOutAttrsOrIndices(lhs, rhs))
return result;
// Assigning to a 'string' variable. SPIR-V doesn't have a string type, and we
// do not allow creating or modifying string variables. We do allow use of
// string literals using OpString.
if (isStringType(lhs->getType())) {
emitError("string variables are immutable in SPIR-V.", lhs->getExprLoc());
return nullptr;
}
// Normal assignment procedure
if (!lhsPtr)
lhsPtr = doExpr(lhs, range);
storeValue(lhsPtr, rhs, lhs->getType(), lhs->getLocStart(), range);
// Plain assignment returns a rvalue, while compound assignment returns
// lvalue.
return isCompoundAssignment ? lhsPtr : rhs;
}
void SpirvEmitter::storeValue(SpirvInstruction *lhsPtr,
SpirvInstruction *rhsVal, QualType lhsValType,
SourceLocation loc, SourceRange range) {
// Defend against nullptr source or destination so errors can bubble up to the
// user.
if (!lhsPtr || !rhsVal)
return;
if (const auto *refType = lhsValType->getAs<ReferenceType>())
lhsValType = refType->getPointeeType();
QualType matElemType = {};
const bool lhsIsMat = isMxNMatrix(lhsValType, &matElemType);
const bool lhsIsFloatMat = lhsIsMat && matElemType->isFloatingType();
const bool lhsIsNonFpMat = lhsIsMat && !matElemType->isFloatingType();
if (isScalarType(lhsValType) || isVectorType(lhsValType) || lhsIsFloatMat) {
// Special-case: According to the SPIR-V Spec: There is no physical size
// or bit pattern defined for boolean type. Therefore an unsigned integer
// is used to represent booleans when layout is required. In such cases,
// we should cast the boolean to uint before creating OpStore.
if (isBoolOrVecOfBoolType(lhsValType) &&
lhsPtr->getLayoutRule() != SpirvLayoutRule::Void) {
uint32_t vecSize = 1;
const bool isVec = isVectorType(lhsValType, nullptr, &vecSize);
const auto toType =
isVec ? astContext.getExtVectorType(astContext.UnsignedIntTy, vecSize)
: astContext.UnsignedIntTy;
const auto fromType =
isVec ? astContext.getExtVectorType(astContext.BoolTy, vecSize)
: astContext.BoolTy;
rhsVal = castToInt(rhsVal, fromType, toType, rhsVal->getSourceLocation(),
rhsVal->getSourceRange());
}
spvBuilder.createStore(lhsPtr, rhsVal, loc, range);
} else if (isOpaqueType(lhsValType)) {
// Resource types are represented using RecordType in the AST.
// Handle them before the general RecordType.
//
// HLSL allows to put resource types that translating into SPIR-V opaque
// types in structs, or assign to variables of resource types. These can all
// result in illegal SPIR-V for Vulkan. We just translate here literally and
// let SPIRV-Tools opt to do the legalization work.
//
// Note: legalization specific code
spvBuilder.createStore(lhsPtr, rhsVal, loc, range);
needsLegalization = true;
} else if (isAKindOfStructuredOrByteBuffer(lhsValType)) {
// The rhs should be a pointer and the lhs should be a pointer-to-pointer.
// Directly store the pointer here and let SPIRV-Tools opt to do the clean
// up.
//
// Note: legalization specific code
spvBuilder.createStore(lhsPtr, rhsVal, loc, range);
needsLegalization = true;
// For ConstantBuffers/TextureBuffers, we decompose and assign each field
// recursively like normal structs using the following logic.
//
// The frontend forbids declaring ConstantBuffer<T> or TextureBuffer<T>
// variables as function parameters/returns/variables, but happily accepts
// assignments/returns from ConstantBuffer<T>/TextureBuffer<T> to function
// parameters/returns/variables of type T. And ConstantBuffer<T> is not
// represented differently as struct T.
} else if (isOpaqueArrayType(lhsValType)) {
// SPIRV-Tools can handle legalization of the store in these cases.
if (!lhsValType->isConstantArrayType() || rhsVal->isRValue()) {
spvBuilder.createStore(lhsPtr, rhsVal, loc, range);
needsLegalization = true;
return;
}
// For opaque array types, we cannot perform OpLoad on the whole array and
// then write out as a whole; instead, we need to OpLoad each element
// using access chains. This is to influence later SPIR-V transformations
// to use access chains to access each opaque object; if we do array
// wholesale handling here, they will be in the final transformed code.
// Drivers don't like that.
const auto *arrayType = astContext.getAsConstantArrayType(lhsValType);
const auto elemType = arrayType->getElementType();
const auto arraySize =
static_cast<uint32_t>(arrayType->getSize().getZExtValue());
// Do separate load of each element via access chain
llvm::SmallVector<SpirvInstruction *, 8> elements;
for (uint32_t i = 0; i < arraySize; ++i) {
auto *subRhsPtr = spvBuilder.createAccessChain(
elemType, rhsVal,
{spvBuilder.getConstantInt(astContext.IntTy,
llvm::APInt(32, i, true))},
loc);
elements.push_back(
spvBuilder.createLoad(elemType, subRhsPtr, loc, range));
}
// Create a new composite and write out once
spvBuilder.createStore(
lhsPtr,
spvBuilder.createCompositeConstruct(lhsValType, elements,
rhsVal->getSourceLocation(), range),
loc, range);
} else if (lhsPtr->getLayoutRule() == rhsVal->getLayoutRule()) {
// If lhs and rhs has the same memory layout, we should be safe to load
// from rhs and directly store into lhs and avoid decomposing rhs.
// Note: this check should happen after those setting needsLegalization.
// TODO: is this optimization always correct?
spvBuilder.createStore(lhsPtr, rhsVal, loc, range);
} else if (lhsValType->isRecordType() || lhsValType->isConstantArrayType() ||
lhsIsNonFpMat) {
spvBuilder.createStore(lhsPtr,
reconstructValue(rhsVal, lhsValType,
lhsPtr->getLayoutRule(), loc,
range),
loc, range);
} else {
emitError("storing value of type %0 unimplemented", {}) << lhsValType;
}
}
SpirvInstruction *SpirvEmitter::reconstructValue(SpirvInstruction *srcVal,
const QualType valType,
SpirvLayoutRule dstLR,
SourceLocation loc,
SourceRange range) {
// Lambda for casting scalar or vector of bool<-->uint in cases where one side
// of the reconstruction (lhs or rhs) has a layout rule.
const auto handleBooleanLayout = [this, &srcVal, dstLR, loc,
range](SpirvInstruction *val,
QualType valType) {
// We only need to cast if we have a scalar or vector of booleans.
if (!isBoolOrVecOfBoolType(valType))
return val;
SpirvLayoutRule srcLR = srcVal->getLayoutRule();
// Source value has a layout rule, and has therefore been represented
// as a uint. Cast it to boolean before using.
bool shouldCastToBool =
srcLR != SpirvLayoutRule::Void && dstLR == SpirvLayoutRule::Void;
// Destination has a layout rule, and should therefore be represented
// as a uint. Cast to uint before using.
bool shouldCastToUint =
srcLR == SpirvLayoutRule::Void && dstLR != SpirvLayoutRule::Void;
// No boolean layout issues to take care of.
if (!shouldCastToBool && !shouldCastToUint)
return val;
uint32_t vecSize = 1;
isVectorType(valType, nullptr, &vecSize);
QualType boolType =
vecSize == 1 ? astContext.BoolTy
: astContext.getExtVectorType(astContext.BoolTy, vecSize);
QualType uintType =
vecSize == 1
? astContext.UnsignedIntTy
: astContext.getExtVectorType(astContext.UnsignedIntTy, vecSize);
if (shouldCastToBool)
return castToBool(val, uintType, boolType, loc, range);
if (shouldCastToUint)
return castToInt(val, boolType, uintType, loc, range);
return val;
};
// Lambda for cases where we want to reconstruct an array
const auto reconstructArray = [this, &srcVal, valType, dstLR, loc,
range](uint32_t arraySize,
QualType arrayElemType) {
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < arraySize; ++i) {
SpirvInstruction *subSrcVal = spvBuilder.createCompositeExtract(
arrayElemType, srcVal, {i}, loc, range);
subSrcVal->setLayoutRule(srcVal->getLayoutRule());
elements.push_back(
reconstructValue(subSrcVal, arrayElemType, dstLR, loc, range));
}
auto *result = spvBuilder.createCompositeConstruct(
valType, elements, srcVal->getSourceLocation(), range);
result->setLayoutRule(dstLR);
return result;
};
// Constant arrays
if (const auto *arrayType = astContext.getAsConstantArrayType(valType)) {
const auto elemType = arrayType->getElementType();
const auto size =
static_cast<uint32_t>(arrayType->getSize().getZExtValue());
return reconstructArray(size, elemType);
}
// Non-floating-point matrices
QualType matElemType = {};
uint32_t numRows = 0, numCols = 0;
const bool isNonFpMat =
isMxNMatrix(valType, &matElemType, &numRows, &numCols) &&
!matElemType->isFloatingType();
if (isNonFpMat) {
// Note: This check should happen before the RecordType check.
// Non-fp matrices are represented as arrays of vectors in SPIR-V.
// Each array element is a vector. Get the QualType for the vector.
const auto elemType = astContext.getExtVectorType(matElemType, numCols);
return reconstructArray(numRows, elemType);
}
// Note: This check should happen before the RecordType check since
// vector/matrix/resource types are represented as RecordType in the AST.
if (hlsl::IsHLSLVecMatType(valType) || hlsl::IsHLSLResourceType(valType))
return handleBooleanLayout(srcVal, valType);
// Structs
if (const auto *recordType = valType->getAs<RecordType>()) {
assert(recordType->isStructureType());
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const StructType *spirvStructType =
lowerStructType(spirvOptions, lowerTypeVisitor, recordType->desugar());
llvm::SmallVector<SpirvInstruction *, 4> elements;
forEachSpirvField(
recordType, spirvStructType,
[&](size_t spirvFieldIndex, const QualType &fieldType,
const auto &field) {
SpirvInstruction *subSrcVal = spvBuilder.createCompositeExtract(
fieldType, srcVal, {static_cast<uint32_t>(spirvFieldIndex)}, loc,
range);
subSrcVal->setLayoutRule(srcVal->getLayoutRule());
elements.push_back(
reconstructValue(subSrcVal, fieldType, dstLR, loc, range));
return true;
});
auto *result = spvBuilder.createCompositeConstruct(
valType, elements, srcVal->getSourceLocation(), range);
result->setLayoutRule(dstLR);
return result;
}
return handleBooleanLayout(srcVal, valType);
}
SpirvInstruction *SpirvEmitter::processBinaryOp(
const Expr *lhs, const Expr *rhs, const BinaryOperatorKind opcode,
const QualType computationType, const QualType resultType,
SourceRange sourceRange, SourceLocation loc, SpirvInstruction **lhsInfo,
const spv::Op mandateGenOpcode) {
const QualType lhsType = lhs->getType();
const QualType rhsType = rhs->getType();
// If the operands are of matrix type, we need to dispatch the operation
// onto each element vector iff the operands are not degenerated matrices
// and we don't have a matrix specific SPIR-V instruction for the operation.
if (!isSpirvMatrixOp(mandateGenOpcode) && isMxNMatrix(lhsType)) {
return processMatrixBinaryOp(lhs, rhs, opcode, sourceRange, loc);
}
// Comma operator works differently from other binary operations as there is
// no SPIR-V instruction for it. For each comma, we must evaluate lhs and rhs
// respectively, and return the results of rhs.
if (opcode == BO_Comma) {
(void)doExpr(lhs);
return doExpr(rhs);
}
// Beginning with HLSL 2021, logical operators are short-circuited,
// and can only be used with scalar types.
if ((opcode == BO_LAnd || opcode == BO_LOr) &&
getCompilerInstance().getLangOpts().HLSLVersion >= hlsl::LangStd::v2021) {
// We translate short-circuited operators as follows:
// A && B =>
// result = false;
// if (A)
// result = B;
//
// A || B =>
// result = true;
// if (!A)
// result = B;
SpirvInstruction *lhsVal = loadIfGLValue(lhs);
if (lhsVal == nullptr) {
return nullptr;
}
SpirvInstruction *cond = castToBool(lhsVal, lhs->getType(),
astContext.BoolTy, lhs->getExprLoc());
auto *tempVar =
spvBuilder.addFnVar(astContext.BoolTy, loc, "temp.var.logical");
auto *thenBB = spvBuilder.createBasicBlock("logical.lhs.cond");
auto *mergeBB = spvBuilder.createBasicBlock("logical.merge");
if (opcode == BO_LAnd) {
spvBuilder.createStore(tempVar, spvBuilder.getConstantBool(false), loc,
sourceRange);
} else {
spvBuilder.createStore(tempVar, spvBuilder.getConstantBool(true), loc,
sourceRange);
cond = spvBuilder.createUnaryOp(spv::Op::OpLogicalNot, astContext.BoolTy,
cond, lhs->getExprLoc());
}
// Create the branch instruction. This will end the current basic block.
spvBuilder.createConditionalBranch(cond, thenBB, mergeBB, lhs->getExprLoc(),
mergeBB);
spvBuilder.addSuccessor(thenBB);
spvBuilder.setMergeTarget(mergeBB);
// Handle the then branch.
spvBuilder.setInsertPoint(thenBB);
SpirvInstruction *rhsVal = loadIfGLValue(rhs);
if (rhsVal == nullptr) {
return nullptr;
}
SpirvInstruction *rhsBool = castToBool(
rhsVal, rhs->getType(), astContext.BoolTy, rhs->getExprLoc());
spvBuilder.createStore(tempVar, rhsBool, rhs->getExprLoc());
spvBuilder.createBranch(mergeBB, rhs->getExprLoc());
spvBuilder.addSuccessor(mergeBB);
// From now on, emit instructions into the merge block.
spvBuilder.setInsertPoint(mergeBB);
SpirvInstruction *result =
castToType(tempVar, astContext.BoolTy, resultType, loc, sourceRange);
result = spvBuilder.createLoad(resultType, tempVar, loc, sourceRange);
if (!result)
return nullptr;
result->setRValue();
return result;
}
SpirvInstruction *rhsVal = nullptr, *lhsPtr = nullptr, *lhsVal = nullptr;
if (BinaryOperator::isCompoundAssignmentOp(opcode)) {
// Evalute rhs before lhs
rhsVal = loadIfGLValue(rhs);
lhsVal = lhsPtr = doExpr(lhs);
// This is a compound assignment. We need to load the lhs value if lhs
// is not already rvalue and does not generate a vector shuffle.
if (!lhsPtr->isRValue() && !isVectorShuffle(lhs)) {
lhsVal = loadIfGLValue(lhs, lhsPtr);
}
// For a compound assignments, the AST does not have the proper implicit
// cast if lhs and rhs have different types. So we need to manually cast lhs
// to the computation type.
if (computationType != lhsType)
lhsVal = castToType(lhsVal, lhsType, computationType, lhs->getExprLoc());
} else {
// Evalute lhs before rhs
lhsPtr = doExpr(lhs);
if (!lhsPtr)
return nullptr;
lhsVal = loadIfGLValue(lhs, lhsPtr);
rhsVal = loadIfGLValue(rhs);
}
if (lhsInfo)
*lhsInfo = lhsPtr;
const spv::Op spvOp = (mandateGenOpcode == spv::Op::Max)
? translateOp(opcode, computationType)
: mandateGenOpcode;
switch (opcode) {
case BO_Shl:
case BO_Shr:
case BO_ShlAssign:
case BO_ShrAssign:
// We need to cull the RHS to make sure that we are not shifting by an
// amount that is larger than the bitwidth of the LHS.
rhsVal = spvBuilder.createBinaryOp(spv::Op::OpBitwiseAnd, computationType,
rhsVal, getMaskForBitwidthValue(rhsType),
loc, sourceRange);
LLVM_FALLTHROUGH;
case BO_Add:
case BO_Sub:
case BO_Mul:
case BO_Div:
case BO_Rem:
case BO_LT:
case BO_LE:
case BO_GT:
case BO_GE:
case BO_EQ:
case BO_NE:
case BO_And:
case BO_Or:
case BO_Xor:
case BO_LAnd:
case BO_LOr:
case BO_AddAssign:
case BO_SubAssign:
case BO_MulAssign:
case BO_DivAssign:
case BO_RemAssign:
case BO_AndAssign:
case BO_OrAssign:
case BO_XorAssign: {
if (lhsVal == nullptr || rhsVal == nullptr)
return nullptr;
// To evaluate this expression as an OpSpecConstantOp, we need to make sure
// both operands are constant and at least one of them is a spec constant.
if (SpirvConstant *lhsValConstant = dyn_cast<SpirvConstant>(lhsVal)) {
if (SpirvConstant *rhsValConstant = dyn_cast<SpirvConstant>(rhsVal)) {
if (isAcceptedSpecConstantBinaryOp(spvOp)) {
if (lhsValConstant->isSpecConstant() ||
rhsValConstant->isSpecConstant()) {
auto *val = spvBuilder.createSpecConstantBinaryOp(
spvOp, resultType, lhsVal, rhsVal, loc);
if (!val)
return nullptr;
val->setRValue();
return val;
}
}
}
}
// Normal binary operation
SpirvInstruction *val = nullptr;
if (BinaryOperator::isCompoundAssignmentOp(opcode)) {
val = spvBuilder.createBinaryOp(spvOp, computationType, lhsVal, rhsVal,
loc, sourceRange);
// For a compound assignments, the AST does not have the proper implicit
// cast if lhs and rhs have different types. So we need to manually cast
// the result back to lhs' type.
if (computationType != lhsType)
val = castToType(val, computationType, lhsType, lhs->getExprLoc());
} else {
val = spvBuilder.createBinaryOp(spvOp, resultType, lhsVal, rhsVal, loc,
sourceRange);
}
if (!val)
return nullptr;
val->setRValue();
// Propagate RelaxedPrecision
if ((lhsVal && lhsVal->isRelaxedPrecision()) ||
(rhsVal && rhsVal->isRelaxedPrecision()))
val->setRelaxedPrecision();
return val;
}
case BO_Assign:
llvm_unreachable("assignment should not be handled here");
break;
case BO_PtrMemD:
case BO_PtrMemI:
case BO_Comma:
// Unimplemented
break;
}
emitError("binary operator '%0' unimplemented", lhs->getExprLoc())
<< BinaryOperator::getOpcodeStr(opcode) << sourceRange;
return nullptr;
}
void SpirvEmitter::initOnce(QualType varType, std::string varName,
SpirvVariable *var, const Expr *varInit) {
// For uninitialized resource objects, we do nothing since there is no
// meaningful zero values for them.
if (!varInit && hlsl::IsHLSLResourceType(varType))
return;
varName = "init.done." + varName;
auto loc = varInit ? varInit->getLocStart() : SourceLocation();
// Create a file/module visible variable to hold the initialization state.
SpirvVariable *initDoneVar = spvBuilder.addModuleVar(
astContext.BoolTy, spv::StorageClass::Private, /*isPrecise*/ false, false,
varName, spvBuilder.getConstantBool(false));
auto *condition = spvBuilder.createLoad(astContext.BoolTy, initDoneVar, loc);
auto *todoBB = spvBuilder.createBasicBlock("if.init.todo");
auto *doneBB = spvBuilder.createBasicBlock("if.init.done");
// If initDoneVar contains true, we jump to the "done" basic block; otherwise,
// jump to the "todo" basic block.
spvBuilder.createConditionalBranch(condition, doneBB, todoBB, loc, doneBB);
spvBuilder.addSuccessor(todoBB);
spvBuilder.addSuccessor(doneBB);
spvBuilder.setMergeTarget(doneBB);
spvBuilder.setInsertPoint(todoBB);
// Do initialization and mark done
if (varInit) {
var->setStorageClass(spv::StorageClass::Private);
storeValue(
// Static function variable are of private storage class
var, loadIfGLValue(varInit), varInit->getType(), varInit->getLocEnd());
} else {
spvBuilder.createStore(var, spvBuilder.getConstantNull(varType), loc);
}
spvBuilder.createStore(initDoneVar, spvBuilder.getConstantBool(true), loc);
spvBuilder.createBranch(doneBB, loc);
spvBuilder.addSuccessor(doneBB);
spvBuilder.setInsertPoint(doneBB);
}
bool SpirvEmitter::isVectorShuffle(const Expr *expr) {
// TODO: the following check is essentially duplicated from
// doHLSLVectorElementExpr. Should unify them.
if (const auto *vecElemExpr = dyn_cast<HLSLVectorElementExpr>(expr)) {
const Expr *base = nullptr;
hlsl::VectorMemberAccessPositions accessor;
condenseVectorElementExpr(vecElemExpr, &base, &accessor);
const auto accessorSize = accessor.Count;
if (accessorSize == 1) {
// Selecting only one element. OpAccessChain or OpCompositeExtract for
// such cases.
return false;
}
const auto baseSize = hlsl::GetHLSLVecSize(base->getType());
if (accessorSize != baseSize)
return true;
for (uint32_t i = 0; i < accessorSize; ++i) {
uint32_t position;
accessor.GetPosition(i, &position);
if (position != i)
return true;
}
// Selecting exactly the original vector. No vector shuffle generated.
return false;
}
return false;
}
bool SpirvEmitter::isShortCircuitedOp(const Expr *expr) {
if (!expr || astContext.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) {
return false;
}
const auto *binOp = dyn_cast<BinaryOperator>(expr->IgnoreParens());
if (binOp) {
return binOp->getOpcode() == BO_LAnd || binOp->getOpcode() == BO_LOr;
}
const auto *condOp = dyn_cast<ConditionalOperator>(expr->IgnoreParens());
return condOp;
}
bool SpirvEmitter::stmtTreeContainsShortCircuitedOp(const Stmt *stmt) {
if (!stmt) {
return false;
}
if (isShortCircuitedOp(dyn_cast<Expr>(stmt))) {
return true;
}
for (const auto *child : stmt->children()) {
if (stmtTreeContainsShortCircuitedOp(child)) {
return true;
}
}
return false;
}
bool SpirvEmitter::isTextureMipsSampleIndexing(const CXXOperatorCallExpr *expr,
const Expr **base,
const Expr **location,
const Expr **lod) {
if (!expr)
return false;
// <object>.mips[][] consists of an outer operator[] and an inner operator[]
const CXXOperatorCallExpr *outerExpr = expr;
if (outerExpr->getOperator() != OverloadedOperatorKind::OO_Subscript)
return false;
const Expr *arg0 = outerExpr->getArg(0)->IgnoreParenNoopCasts(astContext);
const CXXOperatorCallExpr *innerExpr = dyn_cast<CXXOperatorCallExpr>(arg0);
// Must have an inner operator[]
if (!innerExpr ||
innerExpr->getOperator() != OverloadedOperatorKind::OO_Subscript) {
return false;
}
const Expr *innerArg0 =
innerExpr->getArg(0)->IgnoreParenNoopCasts(astContext);
const MemberExpr *memberExpr = dyn_cast<MemberExpr>(innerArg0);
if (!memberExpr)
return false;
// Must be accessing the member named "mips" or "sample"
const auto &memberName =
memberExpr->getMemberNameInfo().getName().getAsString();
if (memberName != "mips" && memberName != "sample")
return false;
const Expr *object = memberExpr->getBase();
const auto objectType = object->getType();
if (!isTexture(objectType))
return false;
if (base)
*base = object;
if (lod)
*lod = innerExpr->getArg(1);
if (location)
*location = outerExpr->getArg(1);
return true;
}
bool SpirvEmitter::isBufferTextureIndexing(const CXXOperatorCallExpr *indexExpr,
const Expr **base,
const Expr **index) {
if (!indexExpr)
return false;
// Must be operator[]
if (indexExpr->getOperator() != OverloadedOperatorKind::OO_Subscript)
return false;
const Expr *object = indexExpr->getArg(0);
const auto objectType = object->getType();
if (isBuffer(objectType) || isRWBuffer(objectType) || isTexture(objectType) ||
isRWTexture(objectType)) {
if (base)
*base = object;
if (index)
*index = indexExpr->getArg(1);
return true;
}
return false;
}
bool SpirvEmitter::isDescriptorHeap(const Expr *expr) {
const CXXOperatorCallExpr *operatorExpr = dyn_cast<CXXOperatorCallExpr>(expr);
if (!operatorExpr)
return false;
// Must be operator[]
if (operatorExpr->getOperator() != OverloadedOperatorKind::OO_Subscript)
return false;
const Expr *object = operatorExpr->getArg(0);
const auto objectType = object->getType();
return isResourceDescriptorHeap(objectType) ||
isSamplerDescriptorHeap(objectType);
}
void SpirvEmitter::getDescriptorHeapOperands(const Expr *expr,
const Expr **base,
const Expr **index) {
assert(base || index);
assert(isDescriptorHeap(expr));
const CXXOperatorCallExpr *operatorExpr = cast<CXXOperatorCallExpr>(expr);
if (base)
*base = operatorExpr->getArg(0);
if (index)
*index = operatorExpr->getArg(1);
}
void SpirvEmitter::condenseVectorElementExpr(
const HLSLVectorElementExpr *expr, const Expr **basePtr,
hlsl::VectorMemberAccessPositions *flattenedAccessor) {
llvm::SmallVector<hlsl::VectorMemberAccessPositions, 2> accessors;
*basePtr = expr;
// Recursively descending until we find the true base vector (the base vector
// that does not have a base vector). In the meanwhile, collecting accessors
// in the reverse order.
// Example: for myVector.yxwz.yxz.xx.yx, the true base is 'myVector'.
while (const auto *vecElemBase = dyn_cast<HLSLVectorElementExpr>(*basePtr)) {
accessors.push_back(vecElemBase->getEncodedElementAccess());
*basePtr = vecElemBase->getBase();
// We need to skip any number of parentheses around swizzling at any level.
while (const auto *parenExpr = dyn_cast<ParenExpr>(*basePtr))
*basePtr = parenExpr->getSubExpr();
}
*flattenedAccessor = accessors.back();
for (int32_t i = accessors.size() - 2; i >= 0; --i) {
const auto ¤tAccessor = accessors[i];
// Apply the current level of accessor to the flattened accessor of all
// previous levels of ones.
hlsl::VectorMemberAccessPositions combinedAccessor;
for (uint32_t j = 0; j < currentAccessor.Count; ++j) {
uint32_t currentPosition = 0;
currentAccessor.GetPosition(j, ¤tPosition);
uint32_t previousPosition = 0;
flattenedAccessor->GetPosition(currentPosition, &previousPosition);
combinedAccessor.SetPosition(j, previousPosition);
}
combinedAccessor.Count = currentAccessor.Count;
combinedAccessor.IsValid =
flattenedAccessor->IsValid && currentAccessor.IsValid;
*flattenedAccessor = combinedAccessor;
}
}
SpirvInstruction *SpirvEmitter::createVectorSplat(const Expr *scalarExpr,
uint32_t size,
SourceRange rangeOverride) {
SpirvInstruction *scalarVal = nullptr;
SourceRange range = (rangeOverride != SourceRange())
? rangeOverride
: scalarExpr->getSourceRange();
// Try to evaluate the element as constant first. If successful, then we
// can generate constant instructions for this vector splat.
if ((scalarVal = constEvaluator.tryToEvaluateAsConst(scalarExpr,
isSpecConstantMode))) {
if (!scalarVal)
return nullptr;
scalarVal->setRValue();
} else {
scalarVal = loadIfGLValue(scalarExpr, range);
}
if (!scalarVal || size == 1) {
// Just return the scalar value for vector splat with size 1.
// Note that can be used as an lvalue, so we need to carry over
// the lvalueness for non-constant cases.
return scalarVal;
}
const auto vecType = astContext.getExtVectorType(scalarExpr->getType(), size);
// TODO: we are saying the constant has Function storage class here.
// Should find a more meaningful one.
if (auto *constVal = dyn_cast<SpirvConstant>(scalarVal)) {
llvm::SmallVector<SpirvConstant *, 4> elements(size_t(size), constVal);
const bool isSpecConst = constVal->getopcode() == spv::Op::OpSpecConstant;
auto *value =
spvBuilder.getConstantComposite(vecType, elements, isSpecConst);
if (!value)
return nullptr;
value->setRValue();
return value;
} else {
llvm::SmallVector<SpirvInstruction *, 4> elements(size_t(size), scalarVal);
auto *value = spvBuilder.createCompositeConstruct(
vecType, elements, scalarExpr->getLocStart(), range);
if (!value)
return nullptr;
value->setRValue();
return value;
}
}
void SpirvEmitter::splitVecLastElement(QualType vecType, SpirvInstruction *vec,
SpirvInstruction **residual,
SpirvInstruction **lastElement,
SourceLocation loc) {
assert(hlsl::IsHLSLVecType(vecType));
const uint32_t count = hlsl::GetHLSLVecSize(vecType);
assert(count > 1);
const QualType elemType = hlsl::GetHLSLVecElementType(vecType);
if (count == 2) {
*residual = spvBuilder.createCompositeExtract(elemType, vec, 0, loc);
} else {
llvm::SmallVector<uint32_t, 4> indices;
for (uint32_t i = 0; i < count - 1; ++i)
indices.push_back(i);
const QualType type = astContext.getExtVectorType(elemType, count - 1);
*residual = spvBuilder.createVectorShuffle(type, vec, vec, indices, loc);
}
*lastElement =
spvBuilder.createCompositeExtract(elemType, vec, {count - 1}, loc);
}
SpirvInstruction *SpirvEmitter::convertVectorToStruct(QualType astStructType,
QualType elemType,
SpirvInstruction *vector,
SourceLocation loc,
SourceRange range) {
assert(astStructType->isStructureType());
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const StructType *spirvStructType =
lowerStructType(spirvOptions, lowerTypeVisitor, astStructType);
uint32_t vectorIndex = 0;
uint32_t elemCount = 1;
llvm::SmallVector<SpirvInstruction *, 4> members;
forEachSpirvField(astStructType->getAs<RecordType>(), spirvStructType,
[&](size_t spirvFieldIndex, const QualType &fieldType,
const auto &field) {
if (isScalarType(fieldType)) {
members.push_back(spvBuilder.createCompositeExtract(
elemType, vector, {vectorIndex++}, loc, range));
return true;
}
if (isVectorType(fieldType, nullptr, &elemCount)) {
llvm::SmallVector<uint32_t, 4> indices;
for (uint32_t i = 0; i < elemCount; ++i)
indices.push_back(vectorIndex++);
members.push_back(spvBuilder.createVectorShuffle(
astContext.getExtVectorType(elemType, elemCount),
vector, vector, indices, loc, range));
return true;
}
assert(false && "unhandled type");
return false;
});
return spvBuilder.createCompositeConstruct(
astStructType, members, vector->getSourceLocation(), range);
}
SpirvInstruction *
SpirvEmitter::tryToGenFloatVectorScale(const BinaryOperator *expr) {
const QualType type = expr->getType();
const SourceRange range = expr->getSourceRange();
QualType elemType = {};
// We can only translate floatN * float into OpVectorTimesScalar.
// So the result type must be floatN. Note that float1 is not a valid vector
// in SPIR-V.
if (!(isVectorType(type, &elemType) && elemType->isFloatingType()))
return nullptr;
const Expr *lhs = expr->getLHS();
const Expr *rhs = expr->getRHS();
// Multiplying a float vector with a float scalar will be represented in
// AST via a binary operation with two float vectors as operands; one of
// the operand is from an implicit cast with kind CK_HLSLVectorSplat.
// vector * scalar
if (hlsl::IsHLSLVecType(lhs->getType())) {
if (const auto *cast = dyn_cast<ImplicitCastExpr>(rhs)) {
if (cast->getCastKind() == CK_HLSLVectorSplat) {
const QualType vecType = expr->getType();
if (const auto *compoundAssignExpr =
dyn_cast<CompoundAssignOperator>(expr)) {
const auto computationType =
compoundAssignExpr->getComputationLHSType();
SpirvInstruction *lhsPtr = nullptr;
auto *result = processBinaryOp(lhs, cast->getSubExpr(),
expr->getOpcode(), computationType,
vecType, range, expr->getOperatorLoc(),
&lhsPtr, spv::Op::OpVectorTimesScalar);
return processAssignment(lhs, result, true, lhsPtr, range);
} else {
return processBinaryOp(lhs, cast->getSubExpr(), expr->getOpcode(),
vecType, vecType, range,
expr->getOperatorLoc(), nullptr,
spv::Op::OpVectorTimesScalar);
}
}
}
}
// scalar * vector
if (hlsl::IsHLSLVecType(rhs->getType())) {
if (const auto *cast = dyn_cast<ImplicitCastExpr>(lhs)) {
if (cast->getCastKind() == CK_HLSLVectorSplat) {
const QualType vecType = expr->getType();
// We need to switch the positions of lhs and rhs here because
// OpVectorTimesScalar requires the first operand to be a vector and
// the second to be a scalar.
return processBinaryOp(rhs, cast->getSubExpr(), expr->getOpcode(),
vecType, vecType, range, expr->getOperatorLoc(),
nullptr, spv::Op::OpVectorTimesScalar);
}
}
}
return nullptr;
}
SpirvInstruction *
SpirvEmitter::tryToGenFloatMatrixScale(const BinaryOperator *expr) {
const QualType type = expr->getType();
const SourceRange range = expr->getSourceRange();
// We translate 'floatMxN * float' into OpMatrixTimesScalar.
// We translate 'floatMx1 * float' and 'float1xN * float' using
// OpVectorTimesScalar.
// So the result type can be floatMxN, floatMx1, or float1xN.
if (!hlsl::IsHLSLMatType(type) ||
!hlsl::GetHLSLMatElementType(type)->isFloatingType() || is1x1Matrix(type))
return 0;
const Expr *lhs = expr->getLHS();
const Expr *rhs = expr->getRHS();
const QualType lhsType = lhs->getType();
const QualType rhsType = rhs->getType();
const auto selectOpcode = [](const QualType ty) {
return isMx1Matrix(ty) || is1xNMatrix(ty) ? spv::Op::OpVectorTimesScalar
: spv::Op::OpMatrixTimesScalar;
};
// Multiplying a float matrix with a float scalar will be represented in
// AST via a binary operation with two float matrices as operands; one of
// the operand is from an implicit cast with kind CK_HLSLMatrixSplat.
// matrix * scalar
if (hlsl::IsHLSLMatType(lhsType)) {
if (const auto *cast = dyn_cast<ImplicitCastExpr>(rhs)) {
if (cast->getCastKind() == CK_HLSLMatrixSplat) {
const QualType matType = expr->getType();
const spv::Op opcode = selectOpcode(lhsType);
if (const auto *compoundAssignExpr =
dyn_cast<CompoundAssignOperator>(expr)) {
const auto computationType =
compoundAssignExpr->getComputationLHSType();
SpirvInstruction *lhsPtr = nullptr;
auto *result = processBinaryOp(
lhs, cast->getSubExpr(), expr->getOpcode(), computationType,
matType, range, expr->getOperatorLoc(), &lhsPtr, opcode);
return processAssignment(lhs, result, true, lhsPtr);
} else {
return processBinaryOp(lhs, cast->getSubExpr(), expr->getOpcode(),
matType, matType, range,
expr->getOperatorLoc(), nullptr, opcode);
}
}
}
}
// scalar * matrix
if (hlsl::IsHLSLMatType(rhsType)) {
if (const auto *cast = dyn_cast<ImplicitCastExpr>(lhs)) {
if (cast->getCastKind() == CK_HLSLMatrixSplat) {
const QualType matType = expr->getType();
const spv::Op opcode = selectOpcode(rhsType);
// We need to switch the positions of lhs and rhs here because
// OpMatrixTimesScalar requires the first operand to be a matrix and
// the second to be a scalar.
return processBinaryOp(rhs, cast->getSubExpr(), expr->getOpcode(),
matType, matType, range, expr->getOperatorLoc(),
nullptr, opcode);
}
}
}
return nullptr;
}
SpirvInstruction *SpirvEmitter::tryToAssignToVectorElements(
const Expr *lhs, SpirvInstruction *rhs, SourceRange range) {
// Assigning to a vector swizzling lhs is tricky if we are neither
// writing to one element nor all elements in their original order.
// Under such cases, we need to create a new vector swizzling involving
// both the lhs and rhs vectors and then write the result of this swizzling
// into the base vector of lhs.
// For example, for vec4.yz = vec2, we nee to do the following:
//
// %vec4Val = OpLoad %v4float %vec4
// %vec2Val = OpLoad %v2float %vec2
// %shuffle = OpVectorShuffle %v4float %vec4Val %vec2Val 0 4 5 3
// OpStore %vec4 %shuffle
//
// When doing the vector shuffle, we use the lhs base vector as the first
// vector and the rhs vector as the second vector. Therefore, all elements
// in the second vector will be selected into the shuffle result.
const auto *lhsExpr = dyn_cast<HLSLVectorElementExpr>(lhs);
if (!lhsExpr)
return 0;
// Special case for <scalar-value>.x, which will have an AST of
// HLSLVectorElementExpr whose base is an ImplicitCastExpr
// (CK_HLSLVectorSplat). We just need to assign to <scalar-value>
// for such case.
if (const auto *baseCast = dyn_cast<CastExpr>(lhsExpr->getBase()))
if (baseCast->getCastKind() == CastKind::CK_HLSLVectorSplat &&
hlsl::GetHLSLVecSize(baseCast->getType()) == 1)
return processAssignment(baseCast->getSubExpr(), rhs, false, nullptr,
range);
const Expr *base = nullptr;
hlsl::VectorMemberAccessPositions accessor;
condenseVectorElementExpr(lhsExpr, &base, &accessor);
const QualType baseType = base->getType();
assert(hlsl::IsHLSLVecType(baseType));
const auto baseSize = hlsl::GetHLSLVecSize(baseType);
const auto accessorSize = accessor.Count;
// Whether selecting the whole original vector
bool isSelectOrigin = accessorSize == baseSize;
// Assigning to one component
if (accessorSize == 1) {
if (isBufferTextureIndexing(dyn_cast_or_null<CXXOperatorCallExpr>(base))) {
// Assigning to one component of a RWBuffer/RWTexture element
// We need to use OpImageWrite here.
// Compose the new vector value first
auto *oldVec = doExpr(base, range);
auto *newVec = spvBuilder.createCompositeInsert(
baseType, oldVec, {accessor.Swz0}, rhs, lhs->getLocStart(), range);
auto *result = tryToAssignToRWBufferRWTexture(base, newVec, range);
assert(result); // Definitely RWBuffer/RWTexture assignment
(void)result;
return rhs; // TODO: incorrect for compound assignments
} else {
// Assigning to one component of mesh out attribute/indices vector object.
SpirvInstruction *vecComponent = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, accessor.Swz0));
if (tryToAssignToMSOutAttrsOrIndices(base, rhs, vecComponent))
return rhs;
// Assigning to one normal vector component. Nothing special, just fall
// back to the normal CodeGen path.
return nullptr;
}
}
if (isSelectOrigin) {
for (uint32_t i = 0; i < accessorSize; ++i) {
uint32_t position;
accessor.GetPosition(i, &position);
if (position != i)
isSelectOrigin = false;
}
}
// Assigning to the original vector
if (isSelectOrigin) {
// Ignore this HLSLVectorElementExpr and dispatch to base
return processAssignment(base, rhs, false, nullptr, range);
}
if (tryToAssignToMSOutAttrsOrIndices(base, rhs, /*vecComponent=*/nullptr,
/*noWriteBack=*/true)) {
// Assigning to 'n' components of mesh out attribute/indices vector object.
const QualType elemType =
hlsl::GetHLSLVecElementType(rhs->getAstResultType());
uint32_t i = 0;
for (; i < accessor.Count; ++i) {
auto *rhsElem = spvBuilder.createCompositeExtract(elemType, rhs, {i},
lhs->getLocStart());
uint32_t position;
accessor.GetPosition(i, &position);
SpirvInstruction *vecComponent = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, position));
if (!tryToAssignToMSOutAttrsOrIndices(base, rhsElem, vecComponent))
break;
}
assert(i == accessor.Count);
return rhs;
}
llvm::SmallVector<uint32_t, 4> selectors;
selectors.resize(baseSize);
// Assume we are selecting all original elements first.
for (uint32_t i = 0; i < baseSize; ++i) {
selectors[i] = i;
}
// Now fix up the elements that actually got overwritten by the rhs vector.
// Since we are using the rhs vector as the second vector, their index
// should be offset'ed by the size of the lhs base vector.
for (uint32_t i = 0; i < accessor.Count; ++i) {
uint32_t position;
accessor.GetPosition(i, &position);
selectors[position] = baseSize + i;
}
auto *vec1 = doExpr(base, range);
auto *vec1Val =
vec1->isRValue()
? vec1
: spvBuilder.createLoad(baseType, vec1, base->getLocStart(), range);
auto *shuffle = spvBuilder.createVectorShuffle(
baseType, vec1Val, rhs, selectors, lhs->getLocStart(), range);
if (!tryToAssignToRWBufferRWTexture(base, shuffle))
spvBuilder.createStore(vec1, shuffle, lhs->getLocStart(), range);
// TODO: OK, this return value is incorrect for compound assignments, for
// which cases we should return lvalues. Should at least emit errors if
// this return value is used (can be checked via ASTContext.getParents).
return rhs;
}
SpirvInstruction *SpirvEmitter::tryToAssignToRWBufferRWTexture(
const Expr *lhs, SpirvInstruction *rhs, SourceRange range) {
const Expr *baseExpr = nullptr;
const Expr *indexExpr = nullptr;
const auto lhsExpr = dyn_cast<CXXOperatorCallExpr>(lhs);
if (isBufferTextureIndexing(lhsExpr, &baseExpr, &indexExpr)) {
auto *loc = doExpr(indexExpr, range);
const QualType imageType = baseExpr->getType();
auto *baseInfo = doExpr(baseExpr, range);
const bool rasterizerOrder = isRasterizerOrderedView(imageType);
if (rasterizerOrder) {
beginInvocationInterlock(baseExpr->getExprLoc(), range);
}
auto *image = spvBuilder.createLoad(imageType, baseInfo,
baseExpr->getExprLoc(), range);
spvBuilder.createImageWrite(imageType, image, loc, rhs, lhs->getExprLoc(),
range);
if (rasterizerOrder) {
spvBuilder.createEndInvocationInterlockEXT(baseExpr->getExprLoc(), range);
}
return rhs;
}
return nullptr;
}
SpirvInstruction *SpirvEmitter::tryToAssignToMatrixElements(
const Expr *lhs, SpirvInstruction *rhs, SourceRange range) {
const auto *lhsExpr = dyn_cast<ExtMatrixElementExpr>(lhs);
if (!lhsExpr)
return nullptr;
const Expr *baseMat = lhsExpr->getBase();
auto *base = doExpr(baseMat, range);
const QualType elemType = hlsl::GetHLSLMatElementType(baseMat->getType());
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(baseMat->getType(), rowCount, colCount);
// For each lhs element written to:
// 1. Extract the corresponding rhs element using OpCompositeExtract
// 2. Create access chain for the lhs element using OpAccessChain
// 3. Write using OpStore
const auto accessor = lhsExpr->getEncodedElementAccess();
for (uint32_t i = 0; i < accessor.Count; ++i) {
uint32_t row = 0, col = 0;
accessor.GetPosition(i, &row, &col);
llvm::SmallVector<uint32_t, 2> indices;
// If the matrix only have one row/column, we are indexing into a vector
// then. Only one index is needed for such cases.
if (rowCount > 1)
indices.push_back(row);
if (colCount > 1)
indices.push_back(col);
llvm::SmallVector<SpirvInstruction *, 2> indexInstructions(indices.size(),
nullptr);
for (uint32_t i = 0; i < indices.size(); ++i)
indexInstructions[i] = spvBuilder.getConstantInt(
astContext.IntTy, llvm::APInt(32, indices[i], true));
// If we are writing to only one element, the rhs should already be a
// scalar value.
auto *rhsElem = rhs;
if (accessor.Count > 1) {
rhsElem = spvBuilder.createCompositeExtract(
elemType, rhs, {i}, rhs->getSourceLocation(), range);
}
// If the lhs is actually a matrix of size 1x1, we don't need the access
// chain. base is already the dest pointer.
auto *lhsElemPtr = base;
if (!indexInstructions.empty()) {
assert(!base->isRValue());
// Load the element via access chain
lhsElemPtr = spvBuilder.createAccessChain(
elemType, lhsElemPtr, indexInstructions, lhs->getLocStart(), range);
}
spvBuilder.createStore(lhsElemPtr, rhsElem, lhs->getLocStart(), range);
}
// TODO: OK, this return value is incorrect for compound assignments, for
// which cases we should return lvalues. Should at least emit errors if
// this return value is used (can be checked via ASTContext.getParents).
return rhs;
}
SpirvInstruction *SpirvEmitter::tryToAssignToMSOutAttrsOrIndices(
const Expr *lhs, SpirvInstruction *rhs, SpirvInstruction *vecComponent,
bool noWriteBack) {
// Early exit for non-mesh shaders.
if (!spvContext.isMS())
return nullptr;
llvm::SmallVector<SpirvInstruction *, 4> indices;
bool isMSOutAttribute = false;
bool isMSOutAttributeBlock = false;
bool isMSOutIndices = false;
const Expr *base = collectArrayStructIndices(lhs, /*rawIndex*/ false,
/*rawIndices*/ nullptr, &indices,
&isMSOutAttribute);
// Expecting at least one array index - early exit.
if (!base || indices.empty())
return nullptr;
const DeclaratorDecl *varDecl = nullptr;
if (isMSOutAttribute) {
const MemberExpr *memberExpr = dyn_cast<MemberExpr>(base);
assert(memberExpr);
varDecl = cast<DeclaratorDecl>(memberExpr->getMemberDecl());
} else {
if (const auto *arg = dyn_cast<DeclRefExpr>(base)) {
if ((varDecl = dyn_cast<DeclaratorDecl>(arg->getDecl()))) {
if (varDecl->hasAttr<HLSLIndicesAttr>()) {
isMSOutIndices = true;
} else if (varDecl->hasAttr<HLSLVerticesAttr>() ||
varDecl->hasAttr<HLSLPrimitivesAttr>()) {
isMSOutAttributeBlock = true;
}
}
}
}
// Return if no out attribute or indices object found.
if (!(isMSOutAttribute || isMSOutAttributeBlock || isMSOutIndices)) {
return nullptr;
}
// For noWriteBack, return without generating write instructions.
if (noWriteBack) {
return rhs;
}
// Add vecComponent to indices.
if (vecComponent) {
indices.push_back(vecComponent);
}
if (isMSOutAttribute) {
assignToMSOutAttribute(varDecl, rhs, indices);
} else if (isMSOutIndices) {
assignToMSOutIndices(varDecl, rhs, indices);
} else {
assert(isMSOutAttributeBlock);
QualType type = varDecl->getType();
assert(isa<ConstantArrayType>(type));
type = astContext.getAsConstantArrayType(type)->getElementType();
assert(type->isStructureType());
// Extract subvalue and assign to its corresponding member attribute.
const auto *structDecl = type->getAs<RecordType>()->getDecl();
for (const auto *field : structDecl->fields()) {
const auto fieldType = field->getType();
SpirvInstruction *subValue = spvBuilder.createCompositeExtract(
fieldType, rhs, {getNumBaseClasses(type) + field->getFieldIndex()},
lhs->getLocStart());
assignToMSOutAttribute(field, subValue, indices);
}
}
// TODO: OK, this return value is incorrect for compound assignments, for
// which cases we should return lvalues. Should at least emit errors if
// this return value is used (can be checked via ASTContext.getParents).
return rhs;
}
void SpirvEmitter::assignToMSOutAttribute(
const DeclaratorDecl *decl, SpirvInstruction *value,
const llvm::SmallVector<SpirvInstruction *, 4> &indices) {
assert(spvContext.isMS() && !indices.empty());
// Extract attribute index and vecComponent (if any).
SpirvInstruction *attrIndex = indices.front();
SpirvInstruction *vecComponent = nullptr;
if (indices.size() > 1) {
vecComponent = indices.back();
}
auto semanticInfo = declIdMapper.getStageVarSemantic(decl);
assert(semanticInfo.isValid());
const auto loc = decl->getLocation();
// Special handle writes to clip/cull distance attributes.
if (declIdMapper.glPerVertex.tryToAccess(
hlsl::DXIL::SigPointKind::MSOut, semanticInfo.semantic->GetKind(),
semanticInfo.index, attrIndex, &value, /*noWriteBack=*/false,
vecComponent, loc)) {
return;
}
// All other attribute writes are handled below.
auto *varInstr = declIdMapper.getStageVarInstruction(decl);
QualType valueType = value->getAstResultType();
if (valueType->isBooleanType()) {
// Externally visible variables are changed to uint, so we need to cast the
// value to uint.
value = castToInt(value, valueType, astContext.UnsignedIntTy, loc);
valueType = astContext.UnsignedIntTy;
}
varInstr = spvBuilder.createAccessChain(valueType, varInstr, indices, loc);
if (semanticInfo.semantic->GetKind() == hlsl::Semantic::Kind::Position)
value = invertYIfRequested(value, semanticInfo.loc);
spvBuilder.createStore(varInstr, value, loc);
}
void SpirvEmitter::assignToMSOutIndices(
const DeclaratorDecl *decl, SpirvInstruction *value,
const llvm::SmallVector<SpirvInstruction *, 4> &indices) {
assert(spvContext.isMS() && !indices.empty());
bool extMesh = featureManager.isExtensionEnabled(Extension::EXT_mesh_shader);
// Extract vertex index and vecComponent (if any).
SpirvInstruction *vertIndex = indices.front();
SpirvInstruction *vecComponent = nullptr;
if (indices.size() > 1) {
vecComponent = indices.back();
}
auto *var = declIdMapper.getStageVarInstruction(decl);
const auto *varTypeDecl = astContext.getAsConstantArrayType(decl->getType());
QualType varType = varTypeDecl->getElementType();
uint32_t numVertices = 1;
if (!isVectorType(varType, nullptr, &numVertices)) {
assert(isScalarType(varType));
}
QualType valueType = value->getAstResultType();
uint32_t numValues = 1;
if (!isVectorType(valueType, nullptr, &numValues)) {
assert(isScalarType(valueType));
}
const auto loc = decl->getLocation();
if (numVertices == 1) {
// for "point" output topology.
assert(numValues == 1);
// create accesschain for PrimitiveIndicesNV[vertIndex].
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, var,
{vertIndex}, loc);
// finally create store for PrimitiveIndicesNV[vertIndex] = value.
spvBuilder.createStore(ptr, value, loc);
} else {
// for "line" or "triangle" output topology.
assert(numVertices == 2 || numVertices == 3);
if (vecComponent) {
// write an individual vector component of uint2 or uint3.
assert(numValues == 1);
if (extMesh) {
// create accesschain for Primitive*IndicesEXT[vertIndex][vecComponent].
auto *ptr = spvBuilder.createAccessChain(
astContext.UnsignedIntTy, var, {vertIndex, vecComponent}, loc);
// finally create store for
// Primitive*IndicesEXT[vertIndex][vecComponent] = value.
spvBuilder.createStore(ptr, value, loc);
} else {
// set baseOffset = vertIndex * numVertices.
auto *baseOffset = spvBuilder.createBinaryOp(
spv::Op::OpIMul, astContext.UnsignedIntTy, vertIndex,
spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, numVertices)),
loc);
// set baseOffset = baseOffset + vecComponent.
baseOffset =
spvBuilder.createBinaryOp(spv::Op::OpIAdd, astContext.UnsignedIntTy,
baseOffset, vecComponent, loc);
// create accesschain for PrimitiveIndicesNV[baseOffset].
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy, var,
{baseOffset}, loc);
// finally create store for PrimitiveIndicesNV[baseOffset] = value.
spvBuilder.createStore(ptr, value, loc);
}
} else {
assert(numValues == numVertices);
if (extMesh) {
// create accesschain for Primitive*IndicesEXT[vertIndex].
auto *ptr = spvBuilder.createAccessChain(varType, var, vertIndex, loc);
// finally create store for Primitive*IndicesEXT[vertIndex] = value.
spvBuilder.createStore(ptr, value, loc);
} else {
// set baseOffset = vertIndex * numVertices.
auto *baseOffset = spvBuilder.createBinaryOp(
spv::Op::OpIMul, astContext.UnsignedIntTy, vertIndex,
spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, numVertices)),
loc);
// write all vector components of uint2 or uint3.
auto *curOffset = baseOffset;
for (uint32_t i = 0; i < numValues; ++i) {
if (i != 0) {
// set curOffset = baseOffset + i.
curOffset = spvBuilder.createBinaryOp(
spv::Op::OpIAdd, astContext.UnsignedIntTy, baseOffset,
spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, i)),
loc);
}
// create accesschain for PrimitiveIndicesNV[curOffset].
auto *ptr = spvBuilder.createAccessChain(astContext.UnsignedIntTy,
var, {curOffset}, loc);
// finally create store for PrimitiveIndicesNV[curOffset] = value[i].
spvBuilder.createStore(ptr,
spvBuilder.createCompositeExtract(
astContext.UnsignedIntTy, value, {i}, loc),
loc);
}
}
}
}
}
SpirvInstruction *SpirvEmitter::processEachVectorInMatrix(
const Expr *matrix, SpirvInstruction *matrixVal,
llvm::function_ref<SpirvInstruction *(uint32_t, QualType, QualType,
SpirvInstruction *)>
actOnEachVector,
SourceLocation loc, SourceRange range) {
return processEachVectorInMatrix(matrix, matrix->getType(), matrixVal,
actOnEachVector, loc, range);
}
SpirvInstruction *SpirvEmitter::processEachVectorInMatrix(
const Expr *matrix, QualType outputType, SpirvInstruction *matrixVal,
llvm::function_ref<SpirvInstruction *(uint32_t, QualType, QualType,
SpirvInstruction *)>
actOnEachVector,
SourceLocation loc, SourceRange range) {
const auto matType = matrix->getType();
assert(isMxNMatrix(matType) && isMxNMatrix(outputType));
const QualType inVecType = getComponentVectorType(astContext, matType);
const QualType outVecType = getComponentVectorType(astContext, outputType);
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(matType, rowCount, colCount);
llvm::SmallVector<SpirvInstruction *, 4> vectors;
// Extract each component vector and do operation on it
for (uint32_t i = 0; i < rowCount; ++i) {
auto *lhsVec = spvBuilder.createCompositeExtract(inVecType, matrixVal, {i},
matrix->getLocStart());
vectors.push_back(actOnEachVector(i, inVecType, outVecType, lhsVec));
}
// Construct the result matrix
auto *val =
spvBuilder.createCompositeConstruct(outputType, vectors, loc, range);
if (!val)
return nullptr;
val->setRValue();
return val;
}
void SpirvEmitter::createSpecConstant(const VarDecl *varDecl) {
class SpecConstantEnvRAII {
public:
// Creates a new instance which sets mode to true on creation,
// and resets mode to false on destruction.
SpecConstantEnvRAII(bool *mode) : modeSlot(mode) { *modeSlot = true; }
~SpecConstantEnvRAII() { *modeSlot = false; }
private:
bool *modeSlot;
};
const QualType varType = varDecl->getType();
bool hasError = false;
if (!varDecl->isExternallyVisible()) {
emitError("specialization constant must be externally visible",
varDecl->getLocation());
hasError = true;
}
if (const auto *builtinType = varType->getAs<BuiltinType>()) {
switch (builtinType->getKind()) {
case BuiltinType::Bool:
case BuiltinType::Int:
case BuiltinType::UInt:
case BuiltinType::Float:
break;
default:
emitError("unsupported specialization constant type",
varDecl->getLocStart());
hasError = true;
}
}
const auto *init = varDecl->getInit();
if (!init) {
emitError("missing default value for specialization constant",
varDecl->getLocation());
hasError = true;
} else if (!isAcceptedSpecConstantInit(init, astContext)) {
emitError("unsupported specialization constant initializer",
init->getLocStart())
<< init->getSourceRange();
hasError = true;
}
if (hasError)
return;
SpecConstantEnvRAII specConstantEnvRAII(&isSpecConstantMode);
const auto specConstant =
constEvaluator.tryToEvaluateAsConst(init, isSpecConstantMode);
// We are not creating a variable to hold the spec constant, instead, we
// translate the varDecl directly into the spec constant here.
spvBuilder.decorateSpecId(
specConstant, varDecl->getAttr<VKConstantIdAttr>()->getSpecConstId(),
varDecl->getLocation());
specConstant->setDebugName(varDecl->getName());
declIdMapper.registerSpecConstant(varDecl, specConstant);
}
SpirvInstruction *
SpirvEmitter::processMatrixBinaryOp(const Expr *lhs, const Expr *rhs,
const BinaryOperatorKind opcode,
SourceRange range, SourceLocation loc) {
// TODO: some code are duplicated from processBinaryOp. Try to unify them.
const auto lhsType = lhs->getType();
assert(isMxNMatrix(lhsType));
const spv::Op spvOp = translateOp(opcode, lhsType);
SpirvInstruction *rhsVal = nullptr, *lhsPtr = nullptr, *lhsVal = nullptr;
if (BinaryOperator::isCompoundAssignmentOp(opcode)) {
// Evalute rhs before lhs
rhsVal = doExpr(rhs);
lhsPtr = doExpr(lhs);
lhsVal = spvBuilder.createLoad(lhsType, lhsPtr, lhs->getLocStart());
} else {
// Evalute lhs before rhs
lhsVal = lhsPtr = doExpr(lhs);
rhsVal = doExpr(rhs);
}
switch (opcode) {
case BO_Add:
case BO_Sub:
case BO_Mul:
case BO_Div:
case BO_Rem:
case BO_AddAssign:
case BO_SubAssign:
case BO_MulAssign:
case BO_DivAssign:
case BO_RemAssign: {
const auto actOnEachVec = [this, spvOp, rhsVal, rhs, loc, range](
uint32_t index, QualType inType,
QualType outType, SpirvInstruction *lhsVec) {
// For each vector of lhs, we need to load the corresponding vector of
// rhs and do the operation on them.
auto *rhsVec = spvBuilder.createCompositeExtract(inType, rhsVal, {index},
rhs->getLocStart());
auto *val =
spvBuilder.createBinaryOp(spvOp, outType, lhsVec, rhsVec, loc, range);
if (val)
val->setRValue();
return val;
};
return processEachVectorInMatrix(lhs, lhsVal, actOnEachVec,
lhs->getLocStart(), range);
}
case BO_Assign:
llvm_unreachable("assignment should not be handled here");
default:
break;
}
emitError("binary operator '%0' over matrix type unimplemented",
lhs->getExprLoc())
<< BinaryOperator::getOpcodeStr(opcode) << range;
return nullptr;
}
const Expr *SpirvEmitter::collectArrayStructIndices(
const Expr *expr, bool rawIndex,
llvm::SmallVectorImpl<uint32_t> *rawIndices,
llvm::SmallVectorImpl<SpirvInstruction *> *indices, bool *isMSOutAttribute,
bool *isNointerp) {
assert((rawIndex && rawIndices) || (!rawIndex && indices));
if (const auto *indexing = dyn_cast<MemberExpr>(expr)) {
// First check whether this is referring to a static member. If it is, we
// create a DeclRefExpr for it.
if (auto *varDecl = dyn_cast<VarDecl>(indexing->getMemberDecl()))
if (varDecl->isStaticDataMember())
return DeclRefExpr::Create(
astContext, NestedNameSpecifierLoc(), SourceLocation(), varDecl,
/*RefersToEnclosingVariableOrCapture=*/false, SourceLocation(),
varDecl->getType(), VK_LValue);
if (isNointerp)
*isNointerp = *isNointerp || isNoInterpMemberExpr(indexing);
const Expr *base = collectArrayStructIndices(
indexing->getBase(), rawIndex, rawIndices, indices, isMSOutAttribute);
if (isMSOutAttribute && base) {
if (const auto *arg = dyn_cast<DeclRefExpr>(base)) {
if (const auto *varDecl = dyn_cast<VarDecl>(arg->getDecl())) {
if (varDecl->hasAttr<HLSLVerticesAttr>() ||
varDecl->hasAttr<HLSLPrimitivesAttr>()) {
assert(spvContext.isMS());
*isMSOutAttribute = true;
return expr;
}
}
}
}
{
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const auto &astStructType =
/* structType */ indexing->getBase()->getType();
const StructType *spirvStructType =
lowerStructType(spirvOptions, lowerTypeVisitor, astStructType);
assert(spirvStructType != nullptr);
const uint32_t fieldIndex =
getFieldIndexInStruct(spirvStructType, astStructType,
/* fieldDecl */
dyn_cast<FieldDecl>(indexing->getMemberDecl()));
if (rawIndex) {
rawIndices->push_back(fieldIndex);
} else {
indices->push_back(spvBuilder.getConstantInt(
astContext.IntTy, llvm::APInt(32, fieldIndex, true)));
}
}
return base;
}
if (const auto *indexing = dyn_cast<ArraySubscriptExpr>(expr)) {
if (rawIndex)
return nullptr; // TODO: handle constant array index
// The base of an ArraySubscriptExpr has a wrapping LValueToRValue implicit
// cast. We need to ingore it to avoid creating OpLoad.
const Expr *thisBase = indexing->getBase()->IgnoreParenLValueCasts();
const Expr *base = collectArrayStructIndices(
thisBase, rawIndex, rawIndices, indices, isMSOutAttribute, isNointerp);
// The index into an array must be an integer number.
const auto *idxExpr = indexing->getIdx();
const auto idxExprType = idxExpr->getType();
if (idxExpr->isLValue()) {
// If the given HLSL code is correct, this case will not happen, because
// the correct HLSL code will not use LValue for the index of an array.
emitError("Index of ArraySubscriptExpr must be rvalue",
idxExpr->getExprLoc());
return nullptr;
}
// Since `doExpr(idxExpr)` can generate LValue SPIR-V instruction for
// RValue `idxExpr` (see
// https://github.com/microsoft/DirectXShaderCompiler/issues/3620),
// we have to use `loadIfGLValue(idxExpr)` instead of `doExpr(idxExpr)`.
SpirvInstruction *thisIndex = loadIfGLValue(idxExpr);
if (!idxExprType->isIntegerType() || idxExprType->isBooleanType()) {
thisIndex = castToInt(thisIndex, idxExprType, astContext.UnsignedIntTy,
idxExpr->getExprLoc());
}
indices->push_back(thisIndex);
return base;
}
if (const auto *indexing = dyn_cast<CXXOperatorCallExpr>(expr))
if (indexing->getOperator() == OverloadedOperatorKind::OO_Subscript) {
if (rawIndex)
return nullptr; // TODO: handle constant array index
// If this is indexing into resources, we need specific OpImage*
// instructions for accessing. Return directly to avoid further building
// up the access chain.
if (isBufferTextureIndexing(indexing))
return indexing;
const Expr *thisBase =
indexing->getArg(0)->IgnoreParenNoopCasts(astContext);
const auto thisBaseType = thisBase->getType();
// If the base type is user defined, return the call expr so that any
// user-defined overloads of operator[] are called.
if (hlsl::IsUserDefinedRecordType(thisBaseType))
return expr;
const Expr *base = collectArrayStructIndices(
thisBase, rawIndex, rawIndices, indices, isMSOutAttribute);
if (thisBaseType != base->getType() &&
isAKindOfStructuredOrByteBuffer(thisBaseType)) {
// The immediate base is a kind of structured or byte buffer. It should
// be an alias variable. Break the normal index collecting chain.
// Return the immediate base as the base so that we can apply other
// hacks for legalization over it.
//
// Note: legalization specific code
indices->clear();
base = thisBase;
}
// If the base is a StructureType, we need to push an addtional index 0
// here. This is because we created an additional OpTypeRuntimeArray
// in the structure.
if (isStructuredBuffer(thisBaseType))
indices->push_back(
spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0)));
if ((hlsl::IsHLSLVecType(thisBaseType) &&
(hlsl::GetHLSLVecSize(thisBaseType) == 1)) ||
is1x1Matrix(thisBaseType) || is1xNMatrix(thisBaseType)) {
// If this is a size-1 vector or 1xN matrix, ignore the index.
} else {
indices->push_back(doExpr(indexing->getArg(1)));
}
return base;
}
{
const Expr *index = nullptr;
// TODO: the following is duplicating the logic in doCXXMemberCallExpr.
if (const auto *object = isStructuredBufferLoad(expr, &index)) {
if (rawIndex)
return nullptr; // TODO: handle constant array index
// For object.Load(index), there should be no more indexing into the
// object.
indices->push_back(
spvBuilder.getConstantInt(astContext.IntTy, llvm::APInt(32, 0)));
indices->push_back(doExpr(index));
return object;
}
}
{
// Indexing into ConstantBuffers and TextureBuffers involves an additional
// FlatConversion node which casts the handle to the underlying structure
// type. We can look past the FlatConversion to continue to collect indices.
// For example: MyConstantBufferArray[0].structMember1
// `-MemberExpr .structMember1
// `-ImplicitCastExpr 'const T' lvalue <FlatConversion>
// `-ArraySubscriptExpr 'ConstantBuffer<T>':'ConstantBuffer<T>' lvalue
if (auto *castExpr = dyn_cast<ImplicitCastExpr>(expr)) {
if (castExpr->getCastKind() == CK_FlatConversion) {
const auto *subExpr = castExpr->getSubExpr();
const QualType subExprType = subExpr->getType();
if (isConstantTextureBuffer(subExprType)) {
return collectArrayStructIndices(subExpr, rawIndex, rawIndices,
indices, isMSOutAttribute);
}
}
}
}
// This the deepest we can go. No more array or struct indexing.
return expr;
}
SpirvVariable *SpirvEmitter::turnIntoLValue(QualType type,
SpirvInstruction *source,
SourceLocation loc) {
assert(source->isRValue());
const auto varName = getAstTypeName(type);
const auto var = createTemporaryVar(type, varName, source, loc);
var->setLayoutRule(SpirvLayoutRule::Void);
var->setStorageClass(spv::StorageClass::Function);
var->setContainsAliasComponent(source->containsAliasComponent());
return var;
}
SpirvInstruction *SpirvEmitter::derefOrCreatePointerToValue(
QualType baseType, SpirvInstruction *base, QualType elemType,
const llvm::SmallVector<SpirvInstruction *, 4> &indices, SourceLocation loc,
SourceRange range) {
SpirvInstruction *value = nullptr;
if (base->isLValue()) {
value = spvBuilder.createAccessChain(elemType, base, indices, loc, range);
} else {
// If this is a rvalue, we need a temporary object to hold it
// so that we can get access chain from it.
SpirvVariable *variable = turnIntoLValue(baseType, base, loc);
SpirvInstruction *chain =
spvBuilder.createAccessChain(elemType, variable, indices, loc, range);
// Okay, this part seems weird, but it is intended:
// If the base is originally a rvalue, the whole AST involving the base
// is consistently set up to handle rvalues. By copying the base into
// a temporary variable and grab an access chain from it, we are breaking
// the consistency by turning the base from rvalue into lvalue. Keep in
// mind that there will be no LValueToRValue casts in the AST for us
// to rely on to load the access chain if a rvalue is expected. Therefore,
// we must do the load here. Otherwise, it's up to the consumer of this
// access chain to do the load, and that can be everywhere.
value = spvBuilder.createLoad(elemType, chain, loc);
}
value->setRasterizerOrdered(isRasterizerOrderedView(baseType));
return value;
}
SpirvInstruction *SpirvEmitter::castToBool(SpirvInstruction *fromVal,
QualType fromType,
QualType toBoolType,
SourceLocation loc,
SourceRange range) {
if (isSameType(astContext, fromType, toBoolType))
return fromVal;
{ // Special case handling for converting to a matrix of booleans.
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(fromType, &elemType, &rowCount, &colCount)) {
const auto fromRowQualType =
astContext.getExtVectorType(elemType, colCount);
const auto toBoolRowQualType =
astContext.getExtVectorType(astContext.BoolTy, colCount);
llvm::SmallVector<SpirvInstruction *, 4> rows;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *row = spvBuilder.createCompositeExtract(fromRowQualType, fromVal,
{i}, loc, range);
rows.push_back(
castToBool(row, fromRowQualType, toBoolRowQualType, loc, range));
}
return spvBuilder.createCompositeConstruct(toBoolType, rows, loc, range);
}
}
// Converting to bool means comparing with value zero.
const spv::Op spvOp = translateOp(BO_NE, fromType);
auto *zeroVal = getValueZero(fromType);
return spvBuilder.createBinaryOp(spvOp, toBoolType, fromVal, zeroVal, loc);
}
SpirvInstruction *SpirvEmitter::castToInt(SpirvInstruction *fromVal,
QualType fromType, QualType toIntType,
SourceLocation srcLoc,
SourceRange srcRange) {
if (isEnumType(fromType))
fromType = astContext.IntTy;
if (isSameType(astContext, fromType, toIntType))
return fromVal;
if (isBoolOrVecOfBoolType(fromType)) {
auto *one = getValueOne(toIntType);
auto *zero = getValueZero(toIntType);
return spvBuilder.createSelect(toIntType, fromVal, one, zero, srcLoc,
srcRange);
}
if (fromType->isSpecificBuiltinType(BuiltinType::LitInt)) {
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, toIntType, fromVal,
srcLoc, srcRange);
}
if (isSintOrVecOfSintType(fromType) || isUintOrVecOfUintType(fromType)) {
// First convert the source to the bitwidth of the destination if necessary.
QualType convertedType = {};
fromVal = convertBitwidth(fromVal, srcLoc, fromType, toIntType,
&convertedType, srcRange);
// If bitwidth conversion was the only thing we needed to do, we're done.
if (isSameScalarOrVecType(convertedType, toIntType))
return fromVal;
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, toIntType, fromVal,
srcLoc, srcRange);
}
if (isFloatOrVecOfFloatType(fromType)) {
if (isSintOrVecOfSintType(toIntType)) {
return spvBuilder.createUnaryOp(spv::Op::OpConvertFToS, toIntType,
fromVal, srcLoc, srcRange);
}
if (isUintOrVecOfUintType(toIntType)) {
return spvBuilder.createUnaryOp(spv::Op::OpConvertFToU, toIntType,
fromVal, srcLoc, srcRange);
}
}
{
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
if (isMxNMatrix(fromType, &elemType, &numRows, &numCols)) {
// The source matrix and the target matrix must have the same dimensions.
QualType toElemType = {};
uint32_t toNumRows = 0, toNumCols = 0;
const bool isMat =
isMxNMatrix(toIntType, &toElemType, &toNumRows, &toNumCols);
assert(isMat && numRows == toNumRows && numCols == toNumCols);
(void)isMat;
(void)toNumRows;
(void)toNumCols;
// Casting to a matrix of integers: Cast each row and construct a
// composite.
llvm::SmallVector<SpirvInstruction *, 4> castedRows;
const QualType vecType = getComponentVectorType(astContext, fromType);
const auto fromVecQualType =
astContext.getExtVectorType(elemType, numCols);
const auto toIntVecQualType =
astContext.getExtVectorType(toElemType, numCols);
for (uint32_t row = 0; row < numRows; ++row) {
auto *rowId = spvBuilder.createCompositeExtract(vecType, fromVal, {row},
srcLoc, srcRange);
castedRows.push_back(castToInt(rowId, fromVecQualType, toIntVecQualType,
srcLoc, srcRange));
}
return spvBuilder.createCompositeConstruct(toIntType, castedRows, srcLoc,
srcRange);
}
}
if (const auto *recordType = fromType->getAs<RecordType>()) {
// This code is bogus but approximates the current (unspec'd)
// behavior for the DXIL target.
assert(recordType->isStructureType());
auto fieldDecl = recordType->getDecl()->field_begin();
QualType fieldType = fieldDecl->getType();
QualType elemType = {};
SpirvInstruction *firstField;
if (isVectorType(fieldType, &elemType)) {
fieldType = elemType;
firstField = spvBuilder.createCompositeExtract(fieldType, fromVal, {0, 0},
srcLoc, srcRange);
} else {
firstField = spvBuilder.createCompositeExtract(fieldType, fromVal, {0},
srcLoc, srcRange);
if (fieldDecl->isBitField()) {
firstField = spvBuilder.createBitFieldExtract(
fieldType, firstField, 0, fieldDecl->getBitWidthValue(astContext),
srcLoc, srcRange);
}
}
SpirvInstruction *result =
castToInt(firstField, fieldType, toIntType, srcLoc, srcRange);
result->setLayoutRule(fromVal->getLayoutRule());
return result;
}
emitError("casting from given type to integer unimplemented", srcLoc);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::convertBitwidth(SpirvInstruction *fromVal, SourceLocation loc,
QualType fromType, QualType toType,
QualType *resultType, SourceRange range) {
// At the moment, we will not make bitwidth conversions to/from literal int
// and literal float types because they do not represent the intended SPIR-V
// bitwidth.
if (isLitTypeOrVecOfLitType(fromType) || isLitTypeOrVecOfLitType(toType))
return fromVal;
const auto fromBitwidth = getElementSpirvBitwidth(
astContext, fromType, spirvOptions.enable16BitTypes);
const auto toBitwidth = getElementSpirvBitwidth(
astContext, toType, spirvOptions.enable16BitTypes);
if (fromBitwidth == toBitwidth) {
if (resultType)
*resultType = fromType;
return fromVal;
}
// We want the 'fromType' with the 'toBitwidth'.
const QualType targetType =
getTypeWithCustomBitwidth(astContext, fromType, toBitwidth);
if (resultType)
*resultType = targetType;
if (isFloatOrVecOfFloatType(fromType))
return spvBuilder.createUnaryOp(spv::Op::OpFConvert, targetType, fromVal,
loc, range);
if (isSintOrVecOfSintType(fromType))
return spvBuilder.createUnaryOp(spv::Op::OpSConvert, targetType, fromVal,
loc, range);
if (isUintOrVecOfUintType(fromType))
return spvBuilder.createUnaryOp(spv::Op::OpUConvert, targetType, fromVal,
loc, range);
llvm_unreachable("invalid type passed to convertBitwidth");
}
SpirvInstruction *SpirvEmitter::castToFloat(SpirvInstruction *fromVal,
QualType fromType,
QualType toFloatType,
SourceLocation srcLoc,
SourceRange range) {
if (isSameType(astContext, fromType, toFloatType))
return fromVal;
if (isBoolOrVecOfBoolType(fromType)) {
auto *one = getValueOne(toFloatType);
auto *zero = getValueZero(toFloatType);
return spvBuilder.createSelect(toFloatType, fromVal, one, zero, srcLoc,
range);
}
if (isSintOrVecOfSintType(fromType)) {
// First convert the source to the bitwidth of the destination if necessary.
fromVal =
convertBitwidth(fromVal, srcLoc, fromType, toFloatType, nullptr, range);
return spvBuilder.createUnaryOp(spv::Op::OpConvertSToF, toFloatType,
fromVal, srcLoc, range);
}
if (isUintOrVecOfUintType(fromType)) {
// First convert the source to the bitwidth of the destination if necessary.
fromVal = convertBitwidth(fromVal, srcLoc, fromType, toFloatType);
return spvBuilder.createUnaryOp(spv::Op::OpConvertUToF, toFloatType,
fromVal, srcLoc, range);
}
if (isFloatOrVecOfFloatType(fromType)) {
// This is the case of float to float conversion with different bitwidths.
return convertBitwidth(fromVal, srcLoc, fromType, toFloatType, nullptr,
range);
}
// Casting matrix types
{
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
if (isMxNMatrix(fromType, &elemType, &numRows, &numCols)) {
// The source matrix and the target matrix must have the same dimensions.
QualType toElemType = {};
uint32_t toNumRows = 0, toNumCols = 0;
const auto isMat =
isMxNMatrix(toFloatType, &toElemType, &toNumRows, &toNumCols);
assert(isMat && numRows == toNumRows && numCols == toNumCols);
(void)isMat;
(void)toNumRows;
(void)toNumCols;
// Casting to a matrix of floats: Cast each row and construct a
// composite.
llvm::SmallVector<SpirvInstruction *, 4> castedRows;
const QualType vecType = getComponentVectorType(astContext, fromType);
const auto fromVecQualType =
astContext.getExtVectorType(elemType, numCols);
const auto toIntVecQualType =
astContext.getExtVectorType(toElemType, numCols);
for (uint32_t row = 0; row < numRows; ++row) {
auto *rowId = spvBuilder.createCompositeExtract(vecType, fromVal, {row},
srcLoc, range);
castedRows.push_back(castToFloat(rowId, fromVecQualType,
toIntVecQualType, srcLoc, range));
}
return spvBuilder.createCompositeConstruct(toFloatType, castedRows,
srcLoc, range);
}
}
emitError("casting to floating point unimplemented", srcLoc);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicCallExpr(const CallExpr *callExpr) {
const FunctionDecl *callee = callExpr->getDirectCallee();
const SourceLocation srcLoc = callExpr->getExprLoc();
const SourceRange srcRange = callExpr->getSourceRange();
assert(hlsl::IsIntrinsicOp(callee) &&
"doIntrinsicCallExpr was called for a non-intrinsic function.");
const bool isFloatType = isFloatOrVecMatOfFloatType(callExpr->getType());
const bool isSintType = isSintOrVecMatOfSintType(callExpr->getType());
// Figure out which intrinsic function to translate.
llvm::StringRef group;
uint32_t opcode = static_cast<uint32_t>(hlsl::IntrinsicOp::Num_Intrinsics);
hlsl::GetIntrinsicOp(callee, opcode, group);
GLSLstd450 glslOpcode = GLSLstd450Bad;
SpirvInstruction *retVal = nullptr;
#define INTRINSIC_SPIRV_OP_CASE(intrinsicOp, spirvOp, doEachVec) \
case hlsl::IntrinsicOp::IOP_##intrinsicOp: { \
retVal = processIntrinsicUsingSpirvInst(callExpr, spv::Op::Op##spirvOp, \
doEachVec); \
} break
#define INTRINSIC_OP_CASE(intrinsicOp, glslOp, doEachVec) \
case hlsl::IntrinsicOp::IOP_##intrinsicOp: { \
glslOpcode = GLSLstd450::GLSLstd450##glslOp; \
retVal = processIntrinsicUsingGLSLInst(callExpr, glslOpcode, doEachVec, \
srcLoc, srcRange); \
} break
#define INTRINSIC_OP_CASE_INT_FLOAT(intrinsicOp, glslIntOp, glslFloatOp, \
doEachVec) \
case hlsl::IntrinsicOp::IOP_##intrinsicOp: { \
glslOpcode = isFloatType ? GLSLstd450::GLSLstd450##glslFloatOp \
: GLSLstd450::GLSLstd450##glslIntOp; \
retVal = processIntrinsicUsingGLSLInst(callExpr, glslOpcode, doEachVec, \
srcLoc, srcRange); \
} break
switch (const auto hlslOpcode = static_cast<hlsl::IntrinsicOp>(opcode)) {
case hlsl::IntrinsicOp::IOP_InterlockedAdd:
case hlsl::IntrinsicOp::IOP_InterlockedAnd:
case hlsl::IntrinsicOp::IOP_InterlockedMax:
case hlsl::IntrinsicOp::IOP_InterlockedUMax:
case hlsl::IntrinsicOp::IOP_InterlockedMin:
case hlsl::IntrinsicOp::IOP_InterlockedUMin:
case hlsl::IntrinsicOp::IOP_InterlockedOr:
case hlsl::IntrinsicOp::IOP_InterlockedXor:
case hlsl::IntrinsicOp::IOP_InterlockedExchange:
case hlsl::IntrinsicOp::IOP_InterlockedCompareStore:
case hlsl::IntrinsicOp::IOP_InterlockedCompareExchange:
retVal = processIntrinsicInterlockedMethod(callExpr, hlslOpcode);
break;
case hlsl::IntrinsicOp::IOP_NonUniformResourceIndex:
retVal = processIntrinsicNonUniformResourceIndex(callExpr);
break;
case hlsl::IntrinsicOp::IOP_tex1D:
case hlsl::IntrinsicOp::IOP_tex1Dbias:
case hlsl::IntrinsicOp::IOP_tex1Dgrad:
case hlsl::IntrinsicOp::IOP_tex1Dlod:
case hlsl::IntrinsicOp::IOP_tex1Dproj:
case hlsl::IntrinsicOp::IOP_tex2D:
case hlsl::IntrinsicOp::IOP_tex2Dbias:
case hlsl::IntrinsicOp::IOP_tex2Dgrad:
case hlsl::IntrinsicOp::IOP_tex2Dlod:
case hlsl::IntrinsicOp::IOP_tex2Dproj:
case hlsl::IntrinsicOp::IOP_tex3D:
case hlsl::IntrinsicOp::IOP_tex3Dbias:
case hlsl::IntrinsicOp::IOP_tex3Dgrad:
case hlsl::IntrinsicOp::IOP_tex3Dlod:
case hlsl::IntrinsicOp::IOP_tex3Dproj:
case hlsl::IntrinsicOp::IOP_texCUBE:
case hlsl::IntrinsicOp::IOP_texCUBEbias:
case hlsl::IntrinsicOp::IOP_texCUBEgrad:
case hlsl::IntrinsicOp::IOP_texCUBElod:
case hlsl::IntrinsicOp::IOP_texCUBEproj: {
emitError("deprecated %0 intrinsic function will not be supported", srcLoc)
<< getFunctionOrOperatorName(callee, true);
return nullptr;
}
case hlsl::IntrinsicOp::IOP_dot:
retVal = processIntrinsicDot(callExpr);
break;
case hlsl::IntrinsicOp::IOP_GroupMemoryBarrier:
retVal = processIntrinsicMemoryBarrier(callExpr,
/*isDevice*/ false,
/*groupSync*/ false,
/*isAllBarrier*/ false);
break;
case hlsl::IntrinsicOp::IOP_GroupMemoryBarrierWithGroupSync:
retVal = processIntrinsicMemoryBarrier(callExpr,
/*isDevice*/ false,
/*groupSync*/ true,
/*isAllBarrier*/ false);
break;
case hlsl::IntrinsicOp::IOP_DeviceMemoryBarrier:
retVal = processIntrinsicMemoryBarrier(callExpr, /*isDevice*/ true,
/*groupSync*/ false,
/*isAllBarrier*/ false);
break;
case hlsl::IntrinsicOp::IOP_DeviceMemoryBarrierWithGroupSync:
retVal = processIntrinsicMemoryBarrier(callExpr, /*isDevice*/ true,
/*groupSync*/ true,
/*isAllBarrier*/ false);
break;
case hlsl::IntrinsicOp::IOP_AllMemoryBarrier:
retVal = processIntrinsicMemoryBarrier(callExpr, /*isDevice*/ true,
/*groupSync*/ false,
/*isAllBarrier*/ true);
break;
case hlsl::IntrinsicOp::IOP_AllMemoryBarrierWithGroupSync:
retVal = processIntrinsicMemoryBarrier(callExpr, /*isDevice*/ true,
/*groupSync*/ true,
/*isAllBarrier*/ true);
break;
case hlsl::IntrinsicOp::IOP_CheckAccessFullyMapped:
retVal = spvBuilder.createImageSparseTexelsResident(
doExpr(callExpr->getArg(0)), srcLoc, srcRange);
break;
case hlsl::IntrinsicOp::IOP_mul:
case hlsl::IntrinsicOp::IOP_umul:
retVal = processIntrinsicMul(callExpr);
break;
case hlsl::IntrinsicOp::IOP_all:
retVal = processIntrinsicAllOrAny(callExpr, spv::Op::OpAll);
break;
case hlsl::IntrinsicOp::IOP_any:
retVal = processIntrinsicAllOrAny(callExpr, spv::Op::OpAny);
break;
case hlsl::IntrinsicOp::IOP_asdouble:
case hlsl::IntrinsicOp::IOP_asfloat:
case hlsl::IntrinsicOp::IOP_asfloat16:
case hlsl::IntrinsicOp::IOP_asint:
case hlsl::IntrinsicOp::IOP_asint16:
case hlsl::IntrinsicOp::IOP_asuint:
case hlsl::IntrinsicOp::IOP_asuint16:
retVal = processIntrinsicAsType(callExpr);
break;
case hlsl::IntrinsicOp::IOP_clip:
retVal = processIntrinsicClip(callExpr);
break;
case hlsl::IntrinsicOp::IOP_dst:
retVal = processIntrinsicDst(callExpr);
break;
case hlsl::IntrinsicOp::IOP_clamp:
case hlsl::IntrinsicOp::IOP_uclamp:
retVal = processIntrinsicClamp(callExpr);
break;
case hlsl::IntrinsicOp::IOP_frexp:
retVal = processIntrinsicFrexp(callExpr);
break;
case hlsl::IntrinsicOp::IOP_ldexp:
retVal = processIntrinsicLdexp(callExpr);
break;
case hlsl::IntrinsicOp::IOP_lit:
retVal = processIntrinsicLit(callExpr);
break;
case hlsl::IntrinsicOp::IOP_mad:
case hlsl::IntrinsicOp::IOP_umad:
retVal = processIntrinsicMad(callExpr);
break;
case hlsl::IntrinsicOp::IOP_modf:
retVal = processIntrinsicModf(callExpr);
break;
case hlsl::IntrinsicOp::IOP_msad4:
retVal = processIntrinsicMsad4(callExpr);
break;
case hlsl::IntrinsicOp::IOP_printf:
retVal = processIntrinsicPrintf(callExpr);
break;
case hlsl::IntrinsicOp::IOP_sign: {
if (isFloatOrVecMatOfFloatType(callExpr->getArg(0)->getType()))
retVal = processIntrinsicFloatSign(callExpr);
else
retVal = processIntrinsicUsingGLSLInst(
callExpr, GLSLstd450::GLSLstd450SSign,
/*actPerRowForMatrices*/ true, srcLoc, srcRange);
} break;
case hlsl::IntrinsicOp::IOP_D3DCOLORtoUBYTE4:
retVal = processD3DCOLORtoUBYTE4(callExpr);
break;
case hlsl::IntrinsicOp::IOP_isfinite:
retVal = processIntrinsicIsFinite(callExpr);
break;
case hlsl::IntrinsicOp::IOP_sincos:
retVal = processIntrinsicSinCos(callExpr);
break;
case hlsl::IntrinsicOp::IOP_rcp:
retVal = processIntrinsicRcp(callExpr);
break;
case hlsl::IntrinsicOp::IOP_VkReadClock:
retVal = processIntrinsicReadClock(callExpr);
break;
case hlsl::IntrinsicOp::IOP_VkRawBufferLoad:
retVal = processRawBufferLoad(callExpr);
break;
case hlsl::IntrinsicOp::IOP_VkRawBufferStore:
retVal = processRawBufferStore(callExpr);
break;
case hlsl::IntrinsicOp::IOP_Vkext_execution_mode:
retVal = processIntrinsicExecutionMode(callExpr, false);
break;
case hlsl::IntrinsicOp::IOP_Vkext_execution_mode_id:
retVal = processIntrinsicExecutionMode(callExpr, true);
break;
case hlsl::IntrinsicOp::IOP_saturate:
retVal = processIntrinsicSaturate(callExpr);
break;
case hlsl::IntrinsicOp::IOP_log10:
retVal = processIntrinsicLog10(callExpr);
break;
case hlsl::IntrinsicOp::IOP_f16tof32:
retVal = processIntrinsicF16ToF32(callExpr);
break;
case hlsl::IntrinsicOp::IOP_f32tof16:
retVal = processIntrinsicF32ToF16(callExpr);
break;
case hlsl::IntrinsicOp::IOP_WaveGetLaneCount: {
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "WaveGetLaneCount",
srcLoc);
const QualType retType = callExpr->getCallReturnType(astContext);
auto *var =
declIdMapper.getBuiltinVar(spv::BuiltIn::SubgroupSize, retType, srcLoc);
retVal = spvBuilder.createLoad(retType, var, srcLoc, srcRange);
needsLegalization = true;
} break;
case hlsl::IntrinsicOp::IOP_WaveGetLaneIndex: {
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "WaveGetLaneIndex",
srcLoc);
const QualType retType = callExpr->getCallReturnType(astContext);
auto *var = declIdMapper.getBuiltinVar(
spv::BuiltIn::SubgroupLocalInvocationId, retType, srcLoc);
retVal = spvBuilder.createLoad(retType, var, srcLoc, srcRange);
needsLegalization = true;
} break;
case hlsl::IntrinsicOp::IOP_IsHelperLane:
retVal = processIsHelperLane(callExpr, srcLoc, srcRange);
break;
case hlsl::IntrinsicOp::IOP_WaveIsFirstLane:
retVal = processWaveQuery(callExpr, spv::Op::OpGroupNonUniformElect);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveAllTrue:
retVal = processWaveVote(callExpr, spv::Op::OpGroupNonUniformAll);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveAnyTrue:
retVal = processWaveVote(callExpr, spv::Op::OpGroupNonUniformAny);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveBallot:
retVal = processWaveVote(callExpr, spv::Op::OpGroupNonUniformBallot);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveAllEqual:
retVal = processWaveActiveAllEqual(callExpr);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveCountBits:
retVal = processWaveCountBits(callExpr, spv::GroupOperation::Reduce);
break;
case hlsl::IntrinsicOp::IOP_WaveActiveUSum:
case hlsl::IntrinsicOp::IOP_WaveActiveSum:
case hlsl::IntrinsicOp::IOP_WaveActiveUProduct:
case hlsl::IntrinsicOp::IOP_WaveActiveProduct:
case hlsl::IntrinsicOp::IOP_WaveActiveUMax:
case hlsl::IntrinsicOp::IOP_WaveActiveMax:
case hlsl::IntrinsicOp::IOP_WaveActiveUMin:
case hlsl::IntrinsicOp::IOP_WaveActiveMin:
case hlsl::IntrinsicOp::IOP_WaveActiveBitAnd:
case hlsl::IntrinsicOp::IOP_WaveActiveBitOr:
case hlsl::IntrinsicOp::IOP_WaveActiveBitXor: {
const auto retType = callExpr->getCallReturnType(astContext);
retVal = processWaveReductionOrPrefix(
callExpr, translateWaveOp(hlslOpcode, retType, srcLoc),
spv::GroupOperation::Reduce);
} break;
case hlsl::IntrinsicOp::IOP_WavePrefixUSum:
case hlsl::IntrinsicOp::IOP_WavePrefixSum:
case hlsl::IntrinsicOp::IOP_WavePrefixUProduct:
case hlsl::IntrinsicOp::IOP_WavePrefixProduct: {
const auto retType = callExpr->getCallReturnType(astContext);
retVal = processWaveReductionOrPrefix(
callExpr, translateWaveOp(hlslOpcode, retType, srcLoc),
spv::GroupOperation::ExclusiveScan);
} break;
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixUSum:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixSum:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixUProduct:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixProduct:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixBitAnd:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixBitOr:
case hlsl::IntrinsicOp::IOP_WaveMultiPrefixBitXor: {
const auto retType = callExpr->getCallReturnType(astContext);
retVal = processWaveReductionOrPrefix(
callExpr, translateWaveOp(hlslOpcode, retType, srcLoc),
spv::GroupOperation::PartitionedExclusiveScanNV);
} break;
case hlsl::IntrinsicOp::IOP_WavePrefixCountBits:
retVal = processWaveCountBits(callExpr, spv::GroupOperation::ExclusiveScan);
break;
case hlsl::IntrinsicOp::IOP_WaveReadLaneAt:
case hlsl::IntrinsicOp::IOP_WaveReadLaneFirst:
retVal = processWaveBroadcast(callExpr);
break;
case hlsl::IntrinsicOp::IOP_WaveMatch:
retVal = processWaveMatch(callExpr);
break;
case hlsl::IntrinsicOp::IOP_QuadReadAcrossX:
case hlsl::IntrinsicOp::IOP_QuadReadAcrossY:
case hlsl::IntrinsicOp::IOP_QuadReadAcrossDiagonal:
case hlsl::IntrinsicOp::IOP_QuadReadLaneAt:
retVal = processWaveQuadWideShuffle(callExpr, hlslOpcode);
break;
case hlsl::IntrinsicOp::IOP_abort:
case hlsl::IntrinsicOp::IOP_GetRenderTargetSampleCount:
case hlsl::IntrinsicOp::IOP_GetRenderTargetSamplePosition: {
emitError("no equivalent for %0 intrinsic function in Vulkan", srcLoc)
<< getFunctionOrOperatorName(callee, true);
return 0;
}
case hlsl::IntrinsicOp::IOP_transpose: {
const Expr *mat = callExpr->getArg(0);
const QualType matType = mat->getType();
if (isVectorType(matType) || isScalarType(matType)) {
// A 1xN or Nx1 or 1x1 matrix is a SPIR-V vector/scalar, and its transpose
// is the vector/scalar itself.
retVal = doExpr(mat);
} else {
if (hlsl::GetHLSLMatElementType(matType)->isFloatingType())
retVal = processIntrinsicUsingSpirvInst(callExpr, spv::Op::OpTranspose,
false);
else
retVal =
processNonFpMatrixTranspose(matType, doExpr(mat), srcLoc, srcRange);
}
break;
}
case hlsl::IntrinsicOp::IOP_dot4add_i8packed:
case hlsl::IntrinsicOp::IOP_dot4add_u8packed: {
retVal = processIntrinsicDP4a(callExpr, hlslOpcode);
break;
}
case hlsl::IntrinsicOp::IOP_dot2add: {
retVal = processIntrinsicDP2a(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_pack_s8:
case hlsl::IntrinsicOp::IOP_pack_u8:
case hlsl::IntrinsicOp::IOP_pack_clamp_s8:
case hlsl::IntrinsicOp::IOP_pack_clamp_u8: {
retVal = processIntrinsic8BitPack(callExpr, hlslOpcode);
break;
}
case hlsl::IntrinsicOp::IOP_unpack_s8s16:
case hlsl::IntrinsicOp::IOP_unpack_s8s32:
case hlsl::IntrinsicOp::IOP_unpack_u8u16:
case hlsl::IntrinsicOp::IOP_unpack_u8u32: {
retVal = processIntrinsic8BitUnpack(callExpr, hlslOpcode);
break;
}
// DXR raytracing intrinsics
case hlsl::IntrinsicOp::IOP_DispatchRaysDimensions:
case hlsl::IntrinsicOp::IOP_DispatchRaysIndex:
case hlsl::IntrinsicOp::IOP_GeometryIndex:
case hlsl::IntrinsicOp::IOP_HitKind:
case hlsl::IntrinsicOp::IOP_InstanceIndex:
case hlsl::IntrinsicOp::IOP_InstanceID:
case hlsl::IntrinsicOp::IOP_ObjectRayDirection:
case hlsl::IntrinsicOp::IOP_ObjectRayOrigin:
case hlsl::IntrinsicOp::IOP_ObjectToWorld3x4:
case hlsl::IntrinsicOp::IOP_ObjectToWorld4x3:
case hlsl::IntrinsicOp::IOP_PrimitiveIndex:
case hlsl::IntrinsicOp::IOP_RayFlags:
case hlsl::IntrinsicOp::IOP_RayTCurrent:
case hlsl::IntrinsicOp::IOP_RayTMin:
case hlsl::IntrinsicOp::IOP_WorldRayDirection:
case hlsl::IntrinsicOp::IOP_WorldRayOrigin:
case hlsl::IntrinsicOp::IOP_WorldToObject3x4:
case hlsl::IntrinsicOp::IOP_WorldToObject4x3: {
retVal = processRayBuiltins(callExpr, hlslOpcode);
break;
}
case hlsl::IntrinsicOp::IOP_AcceptHitAndEndSearch:
case hlsl::IntrinsicOp::IOP_IgnoreHit: {
// Any modifications made to the ray payload in an any hit shader are
// preserved before calling AcceptHit/IgnoreHit. Write out the results to
// the payload which is visible only in entry functions
const auto iter = functionInfoMap.find(curFunction);
if (iter != functionInfoMap.end()) {
const auto &entryInfo = iter->second;
if (entryInfo->isEntryFunction) {
const auto payloadArg = curFunction->getParamDecl(0);
const auto payloadArgInst =
declIdMapper.getDeclEvalInfo(payloadArg, payloadArg->getLocStart());
auto tempLoad = spvBuilder.createLoad(
payloadArg->getType(), payloadArgInst, payloadArg->getLocStart());
spvBuilder.createStore(currentRayPayload, tempLoad,
callExpr->getExprLoc());
}
}
bool nvRayTracing =
featureManager.isExtensionEnabled(Extension::NV_ray_tracing);
if (nvRayTracing) {
spvBuilder.createRayTracingOpsNV(
hlslOpcode == hlsl::IntrinsicOp::IOP_AcceptHitAndEndSearch
? spv::Op::OpTerminateRayNV
: spv::Op::OpIgnoreIntersectionNV,
QualType(), {}, srcLoc);
} else {
spvBuilder.createRaytracingTerminateKHR(
hlslOpcode == hlsl::IntrinsicOp::IOP_AcceptHitAndEndSearch
? spv::Op::OpTerminateRayKHR
: spv::Op::OpIgnoreIntersectionKHR,
srcLoc);
// According to the SPIR-V spec, both OpTerminateRayKHR and
// OpIgnoreIntersectionKHR are termination instructions.
// The spec also requires that these instructions must be the last
// instruction in a block.
// Therefore we need to create a new basic block, and the following
// instructions will go there.
auto *newBB = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(newBB);
}
break;
}
case hlsl::IntrinsicOp::IOP_ReportHit: {
retVal = processReportHit(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_TraceRay: {
processTraceRay(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_CallShader: {
processCallShader(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_DispatchMesh: {
processDispatchMesh(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_SetMeshOutputCounts: {
processMeshOutputCounts(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_select: {
const Expr *cond = callExpr->getArg(0);
const Expr *trueExpr = callExpr->getArg(1);
const Expr *falseExpr = callExpr->getArg(2);
retVal = doConditional(callExpr, cond, falseExpr, trueExpr);
break;
}
case hlsl::IntrinsicOp::IOP_min: {
glslOpcode =
isFloatType ? (spirvOptions.finiteMathOnly ? GLSLstd450::GLSLstd450FMin
: GLSLstd450::GLSLstd450NMin)
: isSintType ? GLSLstd450::GLSLstd450SMin
: GLSLstd450::GLSLstd450UMin;
retVal = processIntrinsicUsingGLSLInst(callExpr, glslOpcode, true, srcLoc,
srcRange);
break;
}
case hlsl::IntrinsicOp::IOP_max: {
glslOpcode =
isFloatType ? (spirvOptions.finiteMathOnly ? GLSLstd450::GLSLstd450FMax
: GLSLstd450::GLSLstd450NMax)
: isSintType ? GLSLstd450::GLSLstd450SMax
: GLSLstd450::GLSLstd450UMax;
retVal = processIntrinsicUsingGLSLInst(callExpr, glslOpcode, true, srcLoc,
srcRange);
break;
}
case hlsl::IntrinsicOp::IOP_GetAttributeAtVertex: {
retVal = processGetAttributeAtVertex(callExpr);
break;
}
case hlsl::IntrinsicOp::IOP_ufirstbithigh: {
retVal = processIntrinsicFirstbit(callExpr, GLSLstd450::GLSLstd450FindUMsb);
break;
}
case hlsl::IntrinsicOp::IOP_firstbithigh: {
retVal = processIntrinsicFirstbit(callExpr, GLSLstd450::GLSLstd450FindSMsb);
break;
}
case hlsl::IntrinsicOp::IOP_firstbitlow: {
retVal = processIntrinsicFirstbit(callExpr, GLSLstd450::GLSLstd450FindILsb);
break;
}
case hlsl::IntrinsicOp::IOP_isnan: {
retVal = processIntrinsicUsingSpirvInst(callExpr, spv::Op::OpIsNan,
/* doEachVec= */ true);
// OpIsNan returns a bool/vec<bool>, so the only valid layout is void. It
// will be the responsibility of the store to do an OpSelect and correctly
// convert this type to an externally storable type.
retVal->setLayoutRule(SpirvLayoutRule::Void);
break;
}
INTRINSIC_SPIRV_OP_CASE(ddx, DPdx, true);
INTRINSIC_SPIRV_OP_CASE(ddx_coarse, DPdxCoarse, false);
INTRINSIC_SPIRV_OP_CASE(ddx_fine, DPdxFine, false);
INTRINSIC_SPIRV_OP_CASE(ddy, DPdy, true);
INTRINSIC_SPIRV_OP_CASE(ddy_coarse, DPdyCoarse, false);
INTRINSIC_SPIRV_OP_CASE(ddy_fine, DPdyFine, false);
INTRINSIC_SPIRV_OP_CASE(countbits, BitCount, false);
INTRINSIC_SPIRV_OP_CASE(isinf, IsInf, true);
INTRINSIC_SPIRV_OP_CASE(fmod, FRem, true);
INTRINSIC_SPIRV_OP_CASE(fwidth, Fwidth, true);
INTRINSIC_SPIRV_OP_CASE(reversebits, BitReverse, false);
INTRINSIC_SPIRV_OP_CASE(and, LogicalAnd, false);
INTRINSIC_SPIRV_OP_CASE(or, LogicalOr, false);
INTRINSIC_OP_CASE(round, RoundEven, true);
INTRINSIC_OP_CASE(uabs, SAbs, true);
INTRINSIC_OP_CASE_INT_FLOAT(abs, SAbs, FAbs, true);
INTRINSIC_OP_CASE(acos, Acos, true);
INTRINSIC_OP_CASE(asin, Asin, true);
INTRINSIC_OP_CASE(atan, Atan, true);
INTRINSIC_OP_CASE(atan2, Atan2, true);
INTRINSIC_OP_CASE(ceil, Ceil, true);
INTRINSIC_OP_CASE(cos, Cos, true);
INTRINSIC_OP_CASE(cosh, Cosh, true);
INTRINSIC_OP_CASE(cross, Cross, false);
INTRINSIC_OP_CASE(degrees, Degrees, true);
INTRINSIC_OP_CASE(distance, Distance, false);
INTRINSIC_OP_CASE(determinant, Determinant, false);
INTRINSIC_OP_CASE(exp, Exp, true);
INTRINSIC_OP_CASE(exp2, Exp2, true);
INTRINSIC_OP_CASE(faceforward, FaceForward, false);
INTRINSIC_OP_CASE(floor, Floor, true);
INTRINSIC_OP_CASE(fma, Fma, true);
INTRINSIC_OP_CASE(frac, Fract, true);
INTRINSIC_OP_CASE(length, Length, false);
INTRINSIC_OP_CASE(lerp, FMix, true);
INTRINSIC_OP_CASE(log, Log, true);
INTRINSIC_OP_CASE(log2, Log2, true);
INTRINSIC_OP_CASE(umax, UMax, true);
INTRINSIC_OP_CASE(umin, UMin, true);
INTRINSIC_OP_CASE(normalize, Normalize, false);
INTRINSIC_OP_CASE(pow, Pow, true);
INTRINSIC_OP_CASE(radians, Radians, true);
INTRINSIC_OP_CASE(reflect, Reflect, false);
INTRINSIC_OP_CASE(refract, Refract, false);
INTRINSIC_OP_CASE(rsqrt, InverseSqrt, true);
INTRINSIC_OP_CASE(smoothstep, SmoothStep, true);
INTRINSIC_OP_CASE(step, Step, true);
INTRINSIC_OP_CASE(sin, Sin, true);
INTRINSIC_OP_CASE(sinh, Sinh, true);
INTRINSIC_OP_CASE(tan, Tan, true);
INTRINSIC_OP_CASE(tanh, Tanh, true);
INTRINSIC_OP_CASE(sqrt, Sqrt, true);
INTRINSIC_OP_CASE(trunc, Trunc, true);
default:
emitError("%0 intrinsic function unimplemented", srcLoc)
<< getFunctionOrOperatorName(callee, true);
return 0;
}
#undef INTRINSIC_OP_CASE
#undef INTRINSIC_OP_CASE_INT_FLOAT
if (retVal)
retVal->setRValue();
return retVal;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicFirstbit(const CallExpr *callExpr,
GLSLstd450 glslOpcode) {
const FunctionDecl *callee = callExpr->getDirectCallee();
const SourceLocation srcLoc = callExpr->getExprLoc();
const SourceRange srcRange = callExpr->getSourceRange();
const QualType argType = callExpr->getArg(0)->getType();
const uint32_t bitwidth = getElementSpirvBitwidth(
astContext, argType, spirvOptions.enable16BitTypes);
if (bitwidth != 32) {
emitError("%0 is currently limited to 32-bit width components when "
"targetting SPIR-V",
srcLoc)
<< getFunctionOrOperatorName(callee, true);
return nullptr;
}
return processIntrinsicUsingGLSLInst(callExpr, glslOpcode, false, srcLoc,
srcRange);
}
// Returns true is the given expression can be used as an output parameter.
//
// Warning: this function could return false negatives.
// HLSL had no references, and the AST handling of lvalues and rvalues is not
// to be trusted for this usage.
// - output variables can be passed but marked as rvalues.
// - rvalues can becomes lvalues due to an operator call.
// This means we need to walk the expression, and explicitly allow some cases.
// I might have missed a valid use-case, hence the risk of false-negative.
bool isValidOutputArgument(const Expr *expr) {
// This could be either a member from an R-value, or an L-value. Checking
// struct.
if (const MemberExpr *member = dyn_cast<MemberExpr>(expr))
return isValidOutputArgument(member->getBase());
// This could be either a subscript into an R-value, or an L-value. Checking
// array.
if (const ArraySubscriptExpr *item = dyn_cast<ArraySubscriptExpr>(expr))
return isValidOutputArgument(item->getBase());
// Accessor to HLSL vectors.
if (const HLSLVectorElementExpr *vec = dyn_cast<HLSLVectorElementExpr>(expr))
return isValidOutputArgument(vec->getBase());
// Going through implicit casts.
if (const ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(expr))
return isValidOutputArgument(cast->getSubExpr());
// For call operators, we trust the LValue() method.
// Haven't found a cases where this is not true.
if (const CXXOperatorCallExpr *call = dyn_cast<CXXOperatorCallExpr>(expr))
return call->isLValue();
if (const CallExpr *call = dyn_cast<CallExpr>(expr))
return call->isLValue();
// If we have a declaration, only accept l-values/variables.
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(expr)) {
if (!ref->isLValue())
return false;
return dyn_cast<VarDecl>(ref->getDecl()) != nullptr;
}
return false;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicInterlockedMethod(const CallExpr *expr,
hlsl::IntrinsicOp opcode) {
// The signature of intrinsic atomic methods are:
// void Interlocked*(in R dest, in T value);
// void Interlocked*(in R dest, in T value, out T original_value);
// Note: ALL Interlocked*() methods are forced to have an unsigned integer
// 'value'. Meaning, T is forced to be 'unsigned int'. If the provided
// parameter is not an unsigned integer, the frontend inserts an
// 'ImplicitCastExpr' to convert it to unsigned integer. OpAtomicIAdd (and
// other SPIR-V OpAtomic* instructions) require that the pointee in 'dest' to
// be of the same type as T. This will result in an invalid SPIR-V if 'dest'
// is a signed integer typed resource such as RWTexture1D<int>. For example,
// the following OpAtomicIAdd is invalid because the pointee type defined in
// %1 is a signed integer, while the value passed to atomic add (%3) is an
// unsigned integer.
//
// %_ptr_Image_int = OpTypePointer Image %int
// %1 = OpImageTexelPointer %_ptr_Image_int %RWTexture1D_int %index %uint_0
// %2 = OpLoad %int %value
// %3 = OpBitcast %uint %2 <-------- Inserted by the frontend
// %4 = OpAtomicIAdd %int %1 %uint_1 %uint_0 %3
//
// In such cases, we bypass the forced IntegralCast.
// Moreover, the frontend does not add a cast AST node to cast uint to int
// where necessary. To ensure SPIR-V validity, we add that where necessary.
auto *zero =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
const auto *dest = expr->getArg(0);
const auto srcLoc = expr->getExprLoc();
const auto baseType = dest->getType()->getCanonicalTypeUnqualified();
if (!baseType->isIntegerType() && !baseType->isFloatingType()) {
llvm_unreachable("Unexpected type for atomic operation. Expecting a scalar "
"integer or float values");
return nullptr;
}
const auto doArg = [baseType, this](const CallExpr *callExpr,
uint32_t argIndex) {
const Expr *valueExpr = callExpr->getArg(argIndex);
if (const auto *castExpr = dyn_cast<ImplicitCastExpr>(valueExpr))
if (castExpr->getCastKind() == CK_IntegralCast &&
castExpr->getSubExpr()->getType()->getCanonicalTypeUnqualified() ==
baseType)
valueExpr = castExpr->getSubExpr();
auto *argInstr = doExpr(valueExpr);
if (valueExpr->getType() != baseType)
argInstr = castToInt(argInstr, valueExpr->getType(), baseType,
valueExpr->getExprLoc());
return argInstr;
};
const auto writeToOutputArg = [&baseType, dest,
this](SpirvInstruction *toWrite,
const CallExpr *callExpr,
uint32_t outputArgIndex) {
const auto outputArg = callExpr->getArg(outputArgIndex);
if (!isValidOutputArgument(outputArg)) {
emitError(
"InterlockedCompareExchange requires a reference as output parameter",
outputArg->getExprLoc());
return;
}
const auto outputArgType = outputArg->getType();
if (baseType != outputArgType)
toWrite =
castToInt(toWrite, baseType, outputArgType, dest->getLocStart());
spvBuilder.createStore(doExpr(outputArg), toWrite, callExpr->getExprLoc());
};
// If a vector swizzling of a texture is done as an argument of an
// interlocked method, we need to handle the access to the texture
// buffer element correctly. For example:
//
// InterlockedAdd(myRWTexture[index].r, 1);
//
// `-CallExpr
// |-ImplicitCastExpr
// | `-DeclRefExpr Function 'InterlockedAdd'
// | 'void (unsigned int &, unsigned int)'
// |-HLSLVectorElementExpr 'unsigned int' lvalue vectorcomponent r
// | `-ImplicitCastExpr 'vector<uint, 1>':'vector<unsigned int, 1>'
// | <HLSLVectorSplat>
// | `-CXXOperatorCallExpr 'unsigned int' lvalue
const auto *cxxOpCall = dyn_cast<CXXOperatorCallExpr>(dest);
if (const auto *vector = dyn_cast<HLSLVectorElementExpr>(dest)) {
const Expr *base = vector->getBase();
cxxOpCall = dyn_cast<CXXOperatorCallExpr>(base);
if (const auto *cast = dyn_cast<CastExpr>(base)) {
cxxOpCall = dyn_cast<CXXOperatorCallExpr>(cast->getSubExpr());
}
}
// If the argument is indexing into a texture/buffer, we need to create an
// OpImageTexelPointer instruction.
SpirvInstruction *ptr = nullptr;
if (cxxOpCall) {
const Expr *base = nullptr;
const Expr *index = nullptr;
if (isBufferTextureIndexing(cxxOpCall, &base, &index)) {
if (hlsl::IsHLSLResourceType(base->getType())) {
const auto resultTy = hlsl::GetHLSLResourceResultType(base->getType());
if (!isScalarType(resultTy, nullptr)) {
emitError("Interlocked operation for texture buffer whose result "
"type is non-scalar type is not allowed",
dest->getExprLoc());
return nullptr;
}
}
auto *baseInstr = doExpr(base);
if (baseInstr->isRValue()) {
// OpImageTexelPointer's Image argument must have a type of
// OpTypePointer with Type OpTypeImage. Need to create a temporary
// variable if the baseId is an rvalue.
baseInstr =
createTemporaryVar(base->getType(), getAstTypeName(base->getType()),
baseInstr, base->getExprLoc());
}
auto *coordInstr = doExpr(index);
ptr = spvBuilder.createImageTexelPointer(baseType, baseInstr, coordInstr,
zero, srcLoc);
}
}
if (!ptr) {
auto *ptrInfo = doExpr(dest);
const auto sc = ptrInfo->getStorageClass();
if (sc == spv::StorageClass::Private || sc == spv::StorageClass::Function) {
emitError("using static variable or function scope variable in "
"interlocked operation is not allowed",
dest->getExprLoc());
return nullptr;
}
ptr = ptrInfo;
}
// Atomic operations on memory in the Workgroup storage class should also be
// Workgroup scoped. Otherwise, default to Device scope.
spv::Scope scope = ptr->getStorageClass() == spv::StorageClass::Workgroup
? spv::Scope::Workgroup
: spv::Scope::Device;
const bool isCompareExchange =
opcode == hlsl::IntrinsicOp::IOP_InterlockedCompareExchange;
const bool isCompareStore =
opcode == hlsl::IntrinsicOp::IOP_InterlockedCompareStore;
if (isCompareExchange || isCompareStore) {
auto *comparator = doArg(expr, 1);
auto *valueInstr = doArg(expr, 2);
auto *originalVal = spvBuilder.createAtomicCompareExchange(
baseType, ptr, scope, spv::MemorySemanticsMask::MaskNone,
spv::MemorySemanticsMask::MaskNone, valueInstr, comparator, srcLoc);
if (isCompareExchange)
writeToOutputArg(originalVal, expr, 3);
} else {
auto *value = doArg(expr, 1);
spv::Op atomicOp = translateAtomicHlslOpcodeToSpirvOpcode(opcode);
auto *originalVal = spvBuilder.createAtomicOp(
atomicOp, baseType, ptr, scope, spv::MemorySemanticsMask::MaskNone,
value, srcLoc);
if (expr->getNumArgs() > 2)
writeToOutputArg(originalVal, expr, 2);
}
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicNonUniformResourceIndex(const CallExpr *expr) {
auto *index = doExpr(expr->getArg(0));
// Decorate the expression in NonUniformResourceIndex() with NonUniformEXT.
// Aside from this, we also need to eventually populate the NonUniformEXT
// status to the usages of this expression. This is done by the
// NonUniformVisitor class.
//
// The decoration shouldn't be applied to the operand, rather to a copy of the
// result. Even though applying the decoration to the operand may not be
// functionally incorrect (since adding NonUniform is more conservative), it
// could affect performance and isn't the intent of the shader.
auto *copyInstr =
spvBuilder.createCopyObject(expr->getType(), index, expr->getExprLoc());
copyInstr->setNonUniform();
return copyInstr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicMsad4(const CallExpr *callExpr) {
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
if (!spirvOptions.noWarnEmulatedFeatures)
emitWarning("msad4 intrinsic function is emulated using many SPIR-V "
"instructions due to lack of direct SPIR-V equivalent",
loc);
// Compares a 4-byte reference value and an 8-byte source value and
// accumulates a vector of 4 sums. Each sum corresponds to the masked sum
// of absolute differences of a different byte alignment between the
// reference value and the source value.
// If we have:
// uint v0; // reference
// uint2 v1; // source
// uint4 v2; // accum
// uint4 o0; // result of msad4
// uint4 r0, t0; // temporary values
//
// Then msad4(v0, v1, v2) translates to the following SM5 assembly according
// to fxc:
// Step 1:
// ushr r0.xyz, v1.xxxx, l(8, 16, 24, 0)
// Step 2:
// [result], [ width ], [ offset ], [ insert ], [ base ]
// bfi t0.yzw, l(0, 8, 16, 24), l(0, 24, 16, 8), v1.yyyy , r0.xxyz
// mov t0.x, v1.x
// Step 3:
// msad o0.xyzw, v0.xxxx, t0.xyzw, v2.xyzw
const auto boolType = astContext.BoolTy;
const auto intType = astContext.IntTy;
const auto uintType = astContext.UnsignedIntTy;
const auto uint4Type = astContext.getExtVectorType(uintType, 4);
auto *reference = doExpr(callExpr->getArg(0));
auto *source = doExpr(callExpr->getArg(1));
auto *accum = doExpr(callExpr->getArg(2));
const auto uint0 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
const auto uint8 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 8));
const auto uint16 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 16));
const auto uint24 =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 24));
// Step 1.
auto *v1x = spvBuilder.createCompositeExtract(uintType, source, {0}, loc);
// r0.x = v1xS8 = v1.x shifted by 8 bits
auto *v1xS8 = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical, uintType,
v1x, uint8, loc);
// r0.y = v1xS16 = v1.x shifted by 16 bits
auto *v1xS16 = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
uintType, v1x, uint16, loc);
// r0.z = v1xS24 = v1.x shifted by 24 bits
auto *v1xS24 = spvBuilder.createBinaryOp(spv::Op::OpShiftLeftLogical,
uintType, v1x, uint24, loc);
// Step 2.
// Do bfi 3 times. DXIL bfi is equivalent to SPIR-V OpBitFieldInsert.
auto *v1y = spvBuilder.createCompositeExtract(uintType, source, {1}, loc);
// Note that t0.x = v1.x, nothing we need to do for that.
auto *t0y = spvBuilder.createBitFieldInsert(
uintType, /*base*/ v1xS8, /*insert*/ v1y,
/* bitOffest */ 24, /* bitCount */ 8, loc, range);
auto *t0z = spvBuilder.createBitFieldInsert(
uintType, /*base*/ v1xS16, /*insert*/ v1y,
/* bitOffest */ 16, /* bitCount */ 16, loc, range);
auto *t0w = spvBuilder.createBitFieldInsert(
uintType, /*base*/ v1xS24, /*insert*/ v1y,
/* bitOffest */ 8, /* bitCount */ 24, loc, range);
// Step 3. MSAD (Masked Sum of Absolute Differences)
// Now perform MSAD four times.
// Need to mimic this algorithm in SPIR-V!
//
// UINT msad( UINT ref, UINT src, UINT accum )
// {
// for (UINT i = 0; i < 4; i++)
// {
// BYTE refByte, srcByte, absDiff;
//
// refByte = (BYTE)(ref >> (i * 8));
// if (!refByte)
// {
// continue;
// }
//
// srcByte = (BYTE)(src >> (i * 8));
// if (refByte >= srcByte)
// {
// absDiff = refByte - srcByte;
// }
// else
// {
// absDiff = srcByte - refByte;
// }
//
// // The recommended overflow behavior for MSAD is
// // to do a 32-bit saturate. This is not
// // required, however, and wrapping is allowed.
// // So from an application point of view,
// // overflow behavior is undefined.
// if (UINT_MAX - accum < absDiff)
// {
// accum = UINT_MAX;
// break;
// }
// accum += absDiff;
// }
//
// return accum;
// }
auto *accum0 = spvBuilder.createCompositeExtract(uintType, accum, {0}, loc);
auto *accum1 = spvBuilder.createCompositeExtract(uintType, accum, {1}, loc);
auto *accum2 = spvBuilder.createCompositeExtract(uintType, accum, {2}, loc);
auto *accum3 = spvBuilder.createCompositeExtract(uintType, accum, {3}, loc);
const llvm::SmallVector<SpirvInstruction *, 4> sources = {v1x, t0y, t0z, t0w};
llvm::SmallVector<SpirvInstruction *, 4> accums = {accum0, accum1, accum2,
accum3};
llvm::SmallVector<SpirvInstruction *, 4> refBytes;
llvm::SmallVector<SpirvInstruction *, 4> signedRefBytes;
llvm::SmallVector<SpirvInstruction *, 4> isRefByteZero;
for (uint32_t i = 0; i < 4; ++i) {
refBytes.push_back(spvBuilder.createBitFieldExtract(
uintType, reference, /*offset*/ i * 8, /*count*/ 8, loc, range));
signedRefBytes.push_back(spvBuilder.createUnaryOp(
spv::Op::OpBitcast, intType, refBytes.back(), loc));
isRefByteZero.push_back(spvBuilder.createBinaryOp(
spv::Op::OpIEqual, boolType, refBytes.back(), uint0, loc));
}
for (uint32_t msadNum = 0; msadNum < 4; ++msadNum) {
for (uint32_t byteCount = 0; byteCount < 4; ++byteCount) {
// 'count' is always 8 because we are extracting 8 bits out of 32.
auto *srcByte = spvBuilder.createBitFieldExtract(
uintType, sources[msadNum], /*offset*/ 8 * byteCount, /*count*/ 8,
loc, range);
auto *signedSrcByte =
spvBuilder.createUnaryOp(spv::Op::OpBitcast, intType, srcByte, loc);
auto *sub = spvBuilder.createBinaryOp(spv::Op::OpISub, intType,
signedRefBytes[byteCount],
signedSrcByte, loc);
auto *absSub = spvBuilder.createGLSLExtInst(
intType, GLSLstd450::GLSLstd450SAbs, {sub}, loc);
auto *diff = spvBuilder.createSelect(
uintType, isRefByteZero[byteCount], uint0,
spvBuilder.createUnaryOp(spv::Op::OpBitcast, uintType, absSub, loc),
loc);
// As pointed out by the DXIL reference above, it is *not* required to
// saturate the output to UINT_MAX in case of overflow. Wrapping around is
// also allowed. For simplicity, we will wrap around at this point.
accums[msadNum] = spvBuilder.createBinaryOp(spv::Op::OpIAdd, uintType,
accums[msadNum], diff, loc);
}
}
return spvBuilder.createCompositeConstruct(uint4Type, accums, loc);
}
SpirvInstruction *SpirvEmitter::processWaveQuery(const CallExpr *callExpr,
spv::Op opcode) {
// Signatures:
// bool WaveIsFirstLane()
// uint WaveGetLaneCount()
// uint WaveGetLaneIndex()
assert(callExpr->getNumArgs() == 0);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
const QualType retType = callExpr->getCallReturnType(astContext);
return spvBuilder.createGroupNonUniformOp(
opcode, retType, spv::Scope::Subgroup, {}, callExpr->getExprLoc());
}
SpirvInstruction *SpirvEmitter::processIsHelperLane(const CallExpr *callExpr,
SourceLocation loc,
SourceRange range) {
assert(callExpr->getNumArgs() == 0);
if (!featureManager.isTargetEnvVulkan1p3OrAbove()) {
// If IsHelperlane is used for Vulkan 1.2 or less, we enable
// SPV_EXT_demote_to_helper_invocation extension to use
// OpIsHelperInvocationEXT instruction.
featureManager.allowExtension("SPV_EXT_demote_to_helper_invocation");
const QualType retType = callExpr->getCallReturnType(astContext);
return spvBuilder.createIsHelperInvocationEXT(retType,
callExpr->getExprLoc());
}
// The SpreadVolatileSemanticsPass legalization pass will decorate the
// load with Volatile.
const QualType retType = callExpr->getCallReturnType(astContext);
auto *var =
declIdMapper.getBuiltinVar(spv::BuiltIn::HelperInvocation, retType, loc);
auto retVal = spvBuilder.createLoad(retType, var, loc, range);
needsLegalization = true;
return retVal;
}
SpirvInstruction *SpirvEmitter::processWaveVote(const CallExpr *callExpr,
spv::Op opcode) {
// Signatures:
// bool WaveActiveAnyTrue( bool expr )
// bool WaveActiveAllTrue( bool expr )
// bool uint4 WaveActiveBallot( bool expr )
assert(callExpr->getNumArgs() == 1);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
auto *predicate = doExpr(callExpr->getArg(0));
const QualType retType = callExpr->getCallReturnType(astContext);
return spvBuilder.createGroupNonUniformOp(opcode, retType,
spv::Scope::Subgroup, {predicate},
callExpr->getExprLoc());
}
spv::Op SpirvEmitter::translateWaveOp(hlsl::IntrinsicOp op, QualType type,
SourceLocation srcLoc) {
const bool isSintType = isSintOrVecMatOfSintType(type);
const bool isUintType = isUintOrVecMatOfUintType(type);
const bool isFloatType = isFloatOrVecMatOfFloatType(type);
#define WAVE_OP_CASE_INT(kind, intWaveOp) \
\
case hlsl::IntrinsicOp::IOP_Wave##kind: { \
if (isSintType || isUintType) { \
return spv::Op::OpGroupNonUniform##intWaveOp; \
} \
} break
#define WAVE_OP_CASE_INT_FLOAT(kind, intWaveOp, floatWaveOp) \
\
case hlsl::IntrinsicOp::IOP_Wave##kind: { \
if (isSintType || isUintType) { \
return spv::Op::OpGroupNonUniform##intWaveOp; \
} \
if (isFloatType) { \
return spv::Op::OpGroupNonUniform##floatWaveOp; \
} \
} break
#define WAVE_OP_CASE_SINT_UINT_FLOAT(kind, sintWaveOp, uintWaveOp, \
floatWaveOp) \
\
case hlsl::IntrinsicOp::IOP_Wave##kind: { \
if (isSintType) { \
return spv::Op::OpGroupNonUniform##sintWaveOp; \
} \
if (isUintType) { \
return spv::Op::OpGroupNonUniform##uintWaveOp; \
} \
if (isFloatType) { \
return spv::Op::OpGroupNonUniform##floatWaveOp; \
} \
} break
switch (op) {
WAVE_OP_CASE_INT_FLOAT(ActiveUSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(ActiveSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(ActiveUProduct, IMul, FMul);
WAVE_OP_CASE_INT_FLOAT(ActiveProduct, IMul, FMul);
WAVE_OP_CASE_INT_FLOAT(PrefixUSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(PrefixSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(PrefixUProduct, IMul, FMul);
WAVE_OP_CASE_INT_FLOAT(PrefixProduct, IMul, FMul);
WAVE_OP_CASE_INT(ActiveBitAnd, BitwiseAnd);
WAVE_OP_CASE_INT(ActiveBitOr, BitwiseOr);
WAVE_OP_CASE_INT(ActiveBitXor, BitwiseXor);
WAVE_OP_CASE_SINT_UINT_FLOAT(ActiveUMax, SMax, UMax, FMax);
WAVE_OP_CASE_SINT_UINT_FLOAT(ActiveMax, SMax, UMax, FMax);
WAVE_OP_CASE_SINT_UINT_FLOAT(ActiveUMin, SMin, UMin, FMin);
WAVE_OP_CASE_SINT_UINT_FLOAT(ActiveMin, SMin, UMin, FMin);
WAVE_OP_CASE_INT_FLOAT(MultiPrefixUSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(MultiPrefixSum, IAdd, FAdd);
WAVE_OP_CASE_INT_FLOAT(MultiPrefixUProduct, IMul, FMul);
WAVE_OP_CASE_INT_FLOAT(MultiPrefixProduct, IMul, FMul);
WAVE_OP_CASE_INT(MultiPrefixBitAnd, BitwiseAnd);
WAVE_OP_CASE_INT(MultiPrefixBitOr, BitwiseOr);
WAVE_OP_CASE_INT(MultiPrefixBitXor, BitwiseXor);
default:
// Only Simple Wave Ops are handled here.
break;
}
#undef WAVE_OP_CASE_INT_FLOAT
#undef WAVE_OP_CASE_INT
#undef WAVE_OP_CASE_SINT_UINT_FLOAT
emitError("translating wave operator '%0' unimplemented", srcLoc)
<< static_cast<uint32_t>(op);
return spv::Op::OpNop;
}
SpirvInstruction *
SpirvEmitter::processWaveCountBits(const CallExpr *callExpr,
spv::GroupOperation groupOp) {
// Signatures:
// uint WaveActiveCountBits(bool bBit)
// uint WavePrefixCountBits(Bool bBit)
assert(callExpr->getNumArgs() == 1);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
auto *predicate = doExpr(callExpr->getArg(0));
const auto srcLoc = callExpr->getExprLoc();
const QualType u32Type = astContext.UnsignedIntTy;
const QualType v4u32Type = astContext.getExtVectorType(u32Type, 4);
const QualType retType = callExpr->getCallReturnType(astContext);
auto *ballot = spvBuilder.createGroupNonUniformOp(
spv::Op::OpGroupNonUniformBallot, v4u32Type, spv::Scope::Subgroup,
{predicate}, srcLoc);
return spvBuilder.createGroupNonUniformOp(
spv::Op::OpGroupNonUniformBallotBitCount, retType, spv::Scope::Subgroup,
{ballot}, srcLoc, groupOp);
}
SpirvInstruction *SpirvEmitter::processWaveReductionOrPrefix(
const CallExpr *callExpr, spv::Op opcode, spv::GroupOperation groupOp) {
// Signatures:
// bool WaveActiveAllEqual( <type> expr )
// <type> WaveActiveSum( <type> expr )
// <type> WaveActiveProduct( <type> expr )
// <int_type> WaveActiveBitAnd( <int_type> expr )
// <int_type> WaveActiveBitOr( <int_type> expr )
// <int_type> WaveActiveBitXor( <int_type> expr )
// <type> WaveActiveMin( <type> expr)
// <type> WaveActiveMax( <type> expr)
//
// <type> WavePrefixProduct(<type> value)
// <type> WavePrefixSum(<type> value)
//
// <type> WaveMultiPrefixSum( <type> val, uint4 mask )
// <type> WaveMultiPrefixProduct( <type> val, uint4 mask )
// <int_type> WaveMultiPrefixBitAnd( <int_type> val, uint4 mask )
// <int_type> WaveMultiPrefixBitOr( <int_type> val, uint4 mask )
// <int_type> WaveMultiPrefixBitXor( <int_type> val, uint4 mask )
bool isMultiPrefix =
groupOp == spv::GroupOperation::PartitionedExclusiveScanNV;
assert(callExpr->getNumArgs() == (isMultiPrefix ? 2 : 1));
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
llvm::SmallVector<SpirvInstruction *, 4> operands;
auto *value = doExpr(callExpr->getArg(0));
if (isMultiPrefix) {
SpirvInstruction *mask = doExpr(callExpr->getArg(1));
operands = {value, mask};
} else {
operands = {value};
}
const QualType retType = callExpr->getCallReturnType(astContext);
return spvBuilder.createGroupNonUniformOp(
opcode, retType, spv::Scope::Subgroup, operands, callExpr->getExprLoc(),
llvm::Optional<spv::GroupOperation>(groupOp));
}
SpirvInstruction *SpirvEmitter::processWaveBroadcast(const CallExpr *callExpr) {
// Signatures:
// <type> WaveReadLaneFirst(<type> expr)
// <type> WaveReadLaneAt(<type> expr, uint laneIndex)
const auto numArgs = callExpr->getNumArgs();
const auto srcLoc = callExpr->getExprLoc();
assert(numArgs == 1 || numArgs == 2);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
auto *value = doExpr(callExpr->getArg(0));
const QualType retType = callExpr->getCallReturnType(astContext);
if (numArgs == 2)
// WaveReadLaneAt is in fact not a broadcast operation (even though its name
// might incorrectly suggest so). The proper mapping to SPIR-V for
// it is OpGroupNonUniformShuffle, *not* OpGroupNonUniformBroadcast.
return spvBuilder.createGroupNonUniformOp(
spv::Op::OpGroupNonUniformShuffle, retType, spv::Scope::Subgroup,
{value, doExpr(callExpr->getArg(1))}, srcLoc);
else
return spvBuilder.createGroupNonUniformOp(
spv::Op::OpGroupNonUniformBroadcastFirst, retType, spv::Scope::Subgroup,
{value}, srcLoc);
}
SpirvInstruction *
SpirvEmitter::processWaveQuadWideShuffle(const CallExpr *callExpr,
hlsl::IntrinsicOp op) {
// Signatures:
// <type> QuadReadAcrossX(<type> localValue)
// <type> QuadReadAcrossY(<type> localValue)
// <type> QuadReadAcrossDiagonal(<type> localValue)
// <type> QuadReadLaneAt(<type> sourceValue, uint quadLaneID)
assert(callExpr->getNumArgs() == 1 || callExpr->getNumArgs() == 2);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
auto *value = doExpr(callExpr->getArg(0));
const auto srcLoc = callExpr->getExprLoc();
const QualType retType = callExpr->getCallReturnType(astContext);
SpirvInstruction *target = nullptr;
spv::Op opcode = spv::Op::OpGroupNonUniformQuadSwap;
switch (op) {
case hlsl::IntrinsicOp::IOP_QuadReadAcrossX:
target =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
break;
case hlsl::IntrinsicOp::IOP_QuadReadAcrossY:
target =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 1));
break;
case hlsl::IntrinsicOp::IOP_QuadReadAcrossDiagonal:
target =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 2));
break;
case hlsl::IntrinsicOp::IOP_QuadReadLaneAt:
target = doExpr(callExpr->getArg(1));
opcode = spv::Op::OpGroupNonUniformQuadBroadcast;
break;
default:
llvm_unreachable("case should not appear here");
}
return spvBuilder.createGroupNonUniformOp(
opcode, retType, spv::Scope::Subgroup, {value, target}, srcLoc);
}
SpirvInstruction *
SpirvEmitter::processWaveActiveAllEqual(const CallExpr *callExpr) {
assert(callExpr->getNumArgs() == 1);
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation",
callExpr->getExprLoc());
SpirvInstruction *arg = doExpr(callExpr->getArg(0));
const QualType retType = callExpr->getCallReturnType(astContext);
if (isScalarType(retType))
return processWaveActiveAllEqualScalar(arg, callExpr->getExprLoc());
if (isVectorType(retType))
return processWaveActiveAllEqualVector(arg, callExpr->getExprLoc());
assert(isMxNMatrix(retType));
return processWaveActiveAllEqualMatrix(arg, retType, callExpr->getExprLoc());
}
SpirvInstruction *
SpirvEmitter::processWaveActiveAllEqualScalar(SpirvInstruction *arg,
clang::SourceLocation srcLoc) {
return spvBuilder.createGroupNonUniformOp(
spv::Op::OpGroupNonUniformAllEqual, astContext.BoolTy,
spv::Scope::Subgroup, {arg}, srcLoc);
}
SpirvInstruction *
SpirvEmitter::processWaveActiveAllEqualVector(SpirvInstruction *arg,
clang::SourceLocation srcLoc) {
uint32_t vectorSize = 0;
QualType elementType;
isVectorType(arg->getAstResultType(), &elementType, &vectorSize);
assert(vectorSize >= 2 && "Vector size in spir-v must be at least 2");
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < vectorSize; ++i) {
SpirvInstruction *element =
spvBuilder.createCompositeExtract(elementType, arg, {i}, srcLoc);
elements.push_back(processWaveActiveAllEqualScalar(element, srcLoc));
}
QualType booleanVectortype =
astContext.getExtVectorType(astContext.BoolTy, vectorSize);
return spvBuilder.createCompositeConstruct(booleanVectortype, elements,
srcLoc);
}
SpirvInstruction *
SpirvEmitter::processWaveActiveAllEqualMatrix(SpirvInstruction *arg,
QualType booleanMatrixType,
clang::SourceLocation srcLoc) {
uint32_t numberOfRows = 0;
uint32_t numberOfColumns = 0;
QualType elementType;
isMxNMatrix(arg->getAstResultType(), &elementType, &numberOfRows,
&numberOfColumns);
assert(numberOfRows >= 2 && "Vector size in spir-v must be at least 2");
QualType rowType = astContext.getExtVectorType(elementType, numberOfColumns);
llvm::SmallVector<SpirvInstruction *, 4> rows;
for (uint32_t i = 0; i < numberOfRows; ++i) {
SpirvInstruction *row =
spvBuilder.createCompositeExtract(rowType, arg, {i}, srcLoc);
rows.push_back(processWaveActiveAllEqualVector(row, srcLoc));
}
return spvBuilder.createCompositeConstruct(booleanMatrixType, rows, srcLoc);
}
SpirvInstruction *SpirvEmitter::processWaveMatch(const CallExpr *callExpr) {
assert(callExpr->getNumArgs() == 1);
const auto loc = callExpr->getExprLoc();
// The SPV_NV_shader_subgroup_partitioned extension requires SPIR-V 1.3.
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1, "Wave Operation", loc);
SpirvInstruction *arg = doExpr(callExpr->getArg(0));
return spvBuilder.createUnaryOp(spv::Op::OpGroupNonUniformPartitionNV,
callExpr->getType(), arg, loc);
}
SpirvInstruction *SpirvEmitter::processIntrinsicModf(const CallExpr *callExpr) {
// Signature is: ret modf(x, ip)
// [in] x: the input floating-point value.
// [out] ip: the integer portion of x.
// [out] ret: the fractional portion of x.
// All of the above must be a scalar, vector, or matrix with the same
// component types. Component types can be float or int.
// The ModfStruct SPIR-V instruction returns a struct. The first member is the
// fractional part and the second member is the integer portion.
// ModfStruct {
// <scalar or vector of float> frac;
// <scalar or vector of float> ip;
// }
// Note if the input number (x) is not a float (i.e. 'x' is an int), it is
// automatically converted to float before modf is invoked. Sadly, the 'ip'
// argument is not treated the same way. Therefore, in such cases we'll have
// to manually convert the float result into int.
const Expr *arg = callExpr->getArg(0);
const Expr *ipArg = callExpr->getArg(1);
const auto loc = callExpr->getLocStart();
const auto range = callExpr->getSourceRange();
const auto argType = arg->getType();
const auto ipType = ipArg->getType();
const auto returnType = callExpr->getType();
auto *argInstr = doExpr(arg);
// For scalar and vector argument types.
{
if (isScalarType(argType) || isVectorType(argType)) {
// The struct members *must* have the same type.
const auto modfStructType = spvContext.getHybridStructType(
{HybridStructType::FieldInfo(argType, "frac"),
HybridStructType::FieldInfo(argType, "ip")},
"ModfStructType");
auto *modf = spvBuilder.createGLSLExtInst(
modfStructType, GLSLstd450::GLSLstd450ModfStruct, {argInstr}, loc,
range);
SpirvInstruction *ip =
spvBuilder.createCompositeExtract(argType, modf, {1}, loc, range);
// This will do nothing if the input number (x) and the ip are both of the
// same type. Otherwise, it will convert the ip into int as necessary.
ip = castToInt(ip, argType, ipType, ipArg->getLocStart(), range);
processAssignment(ipArg, ip, false, nullptr);
return spvBuilder.createCompositeExtract(argType, modf, {0}, loc, range);
}
}
// For matrix argument types.
{
uint32_t rowCount = 0, colCount = 0;
QualType elemType = {};
if (isMxNMatrix(argType, &elemType, &rowCount, &colCount)) {
const auto colType = astContext.getExtVectorType(elemType, colCount);
const auto modfStructType = spvContext.getHybridStructType(
{HybridStructType::FieldInfo(colType, "frac"),
HybridStructType::FieldInfo(colType, "ip")},
"ModfStructType");
llvm::SmallVector<SpirvInstruction *, 4> fracs;
llvm::SmallVector<SpirvInstruction *, 4> ips;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *curRow = spvBuilder.createCompositeExtract(colType, argInstr, {i},
loc, range);
auto *modf = spvBuilder.createGLSLExtInst(
modfStructType, GLSLstd450::GLSLstd450ModfStruct, {curRow}, loc,
range);
ips.push_back(
spvBuilder.createCompositeExtract(colType, modf, {1}, loc, range));
fracs.push_back(
spvBuilder.createCompositeExtract(colType, modf, {0}, loc, range));
}
SpirvInstruction *ip =
spvBuilder.createCompositeConstruct(argType, ips, loc, range);
// If the 'ip' is not a float type, the AST will not contain a CastExpr
// because this is internal to the intrinsic function. So, in such a
// case we need to cast manually.
if (!hlsl::GetHLSLMatElementType(ipType)->isFloatingType())
ip = castToInt(ip, argType, ipType, ipArg->getLocStart(), range);
processAssignment(ipArg, ip, false, nullptr, range);
return spvBuilder.createCompositeConstruct(returnType, fracs, loc, range);
}
}
emitError("invalid argument type passed to Modf intrinsic function",
callExpr->getExprLoc());
return nullptr;
}
SpirvInstruction *SpirvEmitter::processIntrinsicMad(const CallExpr *callExpr) {
// Signature is: ret mad(a,b,c)
// All of the above must be a scalar, vector, or matrix with the same
// component types. Component types can be float or int.
// The return value is equal to "a * b + c"
// In the case of float arguments, we can use the GLSL extended instruction
// set's Fma instruction with NoContraction decoration. In the case of integer
// arguments, we'll have to manually perform an OpIMul followed by an OpIAdd
// (We should also apply NoContraction decoration to these two instructions to
// get precise arithmetic).
// TODO: We currently don't propagate the NoContraction decoration.
const auto loc = callExpr->getLocStart();
const auto range = callExpr->getSourceRange();
const Expr *arg0 = callExpr->getArg(0);
const Expr *arg1 = callExpr->getArg(1);
const Expr *arg2 = callExpr->getArg(2);
// All arguments and the return type are the same.
const auto argType = arg0->getType();
auto *arg0Instr = doExpr(arg0);
auto *arg1Instr = doExpr(arg1);
auto *arg2Instr = doExpr(arg2);
auto arg0Loc = arg0->getLocStart();
auto arg1Loc = arg1->getLocStart();
auto arg2Loc = arg2->getLocStart();
// For floating point arguments, we can use the extended instruction set's Fma
// instruction. Sadly we can't simply call processIntrinsicUsingGLSLInst
// because we need to specifically decorate the Fma instruction with
// NoContraction decoration.
if (isFloatOrVecMatOfFloatType(argType)) {
// For matrix cases, operate on each row of the matrix.
if (isMxNMatrix(arg0->getType())) {
const auto actOnEachVec = [this, loc, arg1Instr, arg2Instr, arg1Loc,
arg2Loc,
range](uint32_t index, QualType inType,
QualType outType,
SpirvInstruction *arg0Row) {
auto *arg1Row = spvBuilder.createCompositeExtract(
inType, arg1Instr, {index}, arg1Loc, range);
auto *arg2Row = spvBuilder.createCompositeExtract(
inType, arg2Instr, {index}, arg2Loc, range);
auto *fma = spvBuilder.createGLSLExtInst(
outType, GLSLstd450Fma, {arg0Row, arg1Row, arg2Row}, loc, range);
spvBuilder.decorateNoContraction(fma, loc);
return fma;
};
return processEachVectorInMatrix(arg0, arg0Instr, actOnEachVec, loc,
range);
}
// Non-matrix cases
auto *fma = spvBuilder.createGLSLExtInst(
argType, GLSLstd450Fma, {arg0Instr, arg1Instr, arg2Instr}, loc, range);
spvBuilder.decorateNoContraction(fma, loc);
return fma;
}
// For scalar and vector argument types.
{
if (isScalarType(argType) || isVectorType(argType)) {
auto *mul = spvBuilder.createBinaryOp(spv::Op::OpIMul, argType, arg0Instr,
arg1Instr, loc, range);
auto *add = spvBuilder.createBinaryOp(spv::Op::OpIAdd, argType, mul,
arg2Instr, loc, range);
spvBuilder.decorateNoContraction(mul, loc);
spvBuilder.decorateNoContraction(add, loc);
return add;
}
}
// For matrix argument types.
{
uint32_t rowCount = 0, colCount = 0;
QualType elemType = {};
if (isMxNMatrix(argType, &elemType, &rowCount, &colCount)) {
const auto colType = astContext.getExtVectorType(elemType, colCount);
llvm::SmallVector<SpirvInstruction *, 4> resultRows;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *rowArg0 = spvBuilder.createCompositeExtract(colType, arg0Instr,
{i}, arg0Loc, range);
auto *rowArg1 = spvBuilder.createCompositeExtract(colType, arg1Instr,
{i}, arg1Loc, range);
auto *rowArg2 = spvBuilder.createCompositeExtract(colType, arg2Instr,
{i}, arg2Loc, range);
auto *mul = spvBuilder.createBinaryOp(spv::Op::OpIMul, colType, rowArg0,
rowArg1, loc, range);
auto *add = spvBuilder.createBinaryOp(spv::Op::OpIAdd, colType, mul,
rowArg2, loc, range);
spvBuilder.decorateNoContraction(mul, loc);
spvBuilder.decorateNoContraction(add, loc);
resultRows.push_back(add);
}
return spvBuilder.createCompositeConstruct(argType, resultRows, loc,
range);
}
}
emitError("invalid argument type passed to mad intrinsic function",
callExpr->getExprLoc());
return 0;
}
SpirvInstruction *SpirvEmitter::processIntrinsicLit(const CallExpr *callExpr) {
// Signature is: float4 lit(float n_dot_l, float n_dot_h, float m)
//
// This function returns a lighting coefficient vector
// (ambient, diffuse, specular, 1) where:
// ambient = 1.
// diffuse = (n_dot_l < 0) ? 0 : n_dot_l
// specular = (n_dot_l < 0 || n_dot_h < 0) ? 0 : ((n_dot_h) * m)
auto *nDotL = doExpr(callExpr->getArg(0));
auto *nDotH = doExpr(callExpr->getArg(1));
auto *m = doExpr(callExpr->getArg(2));
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const QualType floatType = astContext.FloatTy;
const QualType boolType = astContext.BoolTy;
SpirvInstruction *floatZero =
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(0.0f));
SpirvInstruction *floatOne =
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(1.0f));
const QualType retType = callExpr->getType();
auto *diffuse = spvBuilder.createGLSLExtInst(
floatType, GLSLstd450::GLSLstd450FMax, {floatZero, nDotL}, loc, range);
auto *min = spvBuilder.createGLSLExtInst(
floatType, GLSLstd450::GLSLstd450FMin, {nDotL, nDotH}, loc, range);
auto *isNeg = spvBuilder.createBinaryOp(spv::Op::OpFOrdLessThan, boolType,
min, floatZero, loc, range);
auto *mul = spvBuilder.createBinaryOp(spv::Op::OpFMul, floatType, nDotH, m,
loc, range);
auto *specular =
spvBuilder.createSelect(floatType, isNeg, floatZero, mul, loc, range);
return spvBuilder.createCompositeConstruct(
retType, {floatOne, diffuse, specular, floatOne}, callExpr->getLocEnd(),
range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicFrexp(const CallExpr *callExpr) {
// Signature is: ret frexp(x, exp)
// [in] x: the input floating-point value.
// [out] exp: the calculated exponent.
// [out] ret: the calculated mantissa.
// All of the above must be a scalar, vector, or matrix of *float* type.
// The FrexpStruct SPIR-V instruction returns a struct. The first
// member is the significand (mantissa) and must be of the same type as the
// input parameter, and the second member is the exponent and must always be a
// scalar or vector of 32-bit *integer* type.
// FrexpStruct {
// <scalar or vector of int/float> mantissa;
// <scalar or vector of integers> exponent;
// }
const Expr *arg = callExpr->getArg(0);
const auto argType = arg->getType();
const auto returnType = callExpr->getType();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
auto *argInstr = doExpr(arg);
auto *expInstr = doExpr(callExpr->getArg(1));
// For scalar and vector argument types.
{
uint32_t elemCount = 1;
if (isScalarType(argType) || isVectorType(argType, nullptr, &elemCount)) {
const QualType expType =
elemCount == 1
? astContext.IntTy
: astContext.getExtVectorType(astContext.IntTy, elemCount);
const auto *frexpStructType = spvContext.getHybridStructType(
{HybridStructType::FieldInfo(argType, "mantissa"),
HybridStructType::FieldInfo(expType, "exponent")},
"FrexpStructType");
auto *frexp = spvBuilder.createGLSLExtInst(
frexpStructType, GLSLstd450::GLSLstd450FrexpStruct, {argInstr}, loc,
range);
auto *exponentInt =
spvBuilder.createCompositeExtract(expType, frexp, {1}, loc, range);
// Since the SPIR-V instruction returns an int, and the intrinsic HLSL
// expects a float, an conversion must take place before writing the
// results.
auto *exponentFloat = spvBuilder.createUnaryOp(
spv::Op::OpConvertSToF, returnType, exponentInt, loc, range);
spvBuilder.createStore(expInstr, exponentFloat, loc, range);
return spvBuilder.createCompositeExtract(argType, frexp, {0}, loc, range);
}
}
// For matrix argument types.
{
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(argType, nullptr, &rowCount, &colCount)) {
const auto expType =
astContext.getExtVectorType(astContext.IntTy, colCount);
const auto colType =
astContext.getExtVectorType(astContext.FloatTy, colCount);
const auto *frexpStructType = spvContext.getHybridStructType(
{HybridStructType::FieldInfo(colType, "mantissa"),
HybridStructType::FieldInfo(expType, "exponent")},
"FrexpStructType");
llvm::SmallVector<SpirvInstruction *, 4> exponents;
llvm::SmallVector<SpirvInstruction *, 4> mantissas;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *curRow = spvBuilder.createCompositeExtract(colType, argInstr, {i},
arg->getLocStart());
auto *frexp = spvBuilder.createGLSLExtInst(
frexpStructType, GLSLstd450::GLSLstd450FrexpStruct, {curRow}, loc,
range);
auto *exponentInt =
spvBuilder.createCompositeExtract(expType, frexp, {1}, loc, range);
// Since the SPIR-V instruction returns an int, and the intrinsic HLSL
// expects a float, an conversion must take place before writing the
// results.
auto *exponentFloat = spvBuilder.createUnaryOp(
spv::Op::OpConvertSToF, colType, exponentInt, loc, range);
exponents.push_back(exponentFloat);
mantissas.push_back(
spvBuilder.createCompositeExtract(colType, frexp, {0}, loc, range));
}
auto *exponentsResult = spvBuilder.createCompositeConstruct(
returnType, exponents, loc, range);
spvBuilder.createStore(expInstr, exponentsResult, loc, range);
return spvBuilder.createCompositeConstruct(returnType, mantissas,
callExpr->getLocEnd(), range);
}
}
emitError("invalid argument type passed to Frexp intrinsic function",
callExpr->getExprLoc());
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicLdexp(const CallExpr *callExpr) {
// Signature: ret ldexp(x, exp)
// This function uses the following formula: x * 2^exp.
// Note that we cannot use GLSL extended instruction Ldexp since it requires
// the exponent to be an integer (vector) but HLSL takes an float (vector)
// exponent. So we must calculate the result manually.
const Expr *x = callExpr->getArg(0);
const auto paramType = x->getType();
auto *xInstr = doExpr(x);
auto *expInstr = doExpr(callExpr->getArg(1));
const auto loc = callExpr->getLocStart();
const auto arg1Loc = callExpr->getArg(1)->getLocStart();
const auto range = callExpr->getSourceRange();
// For scalar and vector argument types.
if (isScalarType(paramType) || isVectorType(paramType)) {
const auto twoExp = spvBuilder.createGLSLExtInst(
paramType, GLSLstd450::GLSLstd450Exp2, {expInstr}, loc, range);
return spvBuilder.createBinaryOp(spv::Op::OpFMul, paramType, xInstr, twoExp,
loc, range);
}
// For matrix argument types.
{
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(paramType, nullptr, &rowCount, &colCount)) {
const auto actOnEachVec = [this, loc, expInstr, arg1Loc,
range](uint32_t index, QualType inType,
QualType outType,
SpirvInstruction *xRowInstr) {
auto *expRowInstr = spvBuilder.createCompositeExtract(
inType, expInstr, {index}, arg1Loc, range);
auto *twoExp = spvBuilder.createGLSLExtInst(
outType, GLSLstd450::GLSLstd450Exp2, {expRowInstr}, loc, range);
return spvBuilder.createBinaryOp(spv::Op::OpFMul, outType, xRowInstr,
twoExp, loc, range);
};
return processEachVectorInMatrix(x, xInstr, actOnEachVec, loc, range);
}
}
emitError("invalid argument type passed to ldexp intrinsic function",
callExpr->getExprLoc());
return nullptr;
}
SpirvInstruction *SpirvEmitter::processIntrinsicDst(const CallExpr *callExpr) {
// Signature is float4 dst(float4 src0, float4 src1)
// result.x = 1;
// result.y = src0.y * src1.y;
// result.z = src0.z;
// result.w = src1.w;
const QualType f32 = astContext.FloatTy;
auto *arg0Id = doExpr(callExpr->getArg(0));
auto *arg1Id = doExpr(callExpr->getArg(1));
auto arg0Loc = callExpr->getArg(0)->getLocStart();
auto arg1Loc = callExpr->getArg(1)->getLocStart();
const auto range = callExpr->getSourceRange();
auto *arg0y =
spvBuilder.createCompositeExtract(f32, arg0Id, {1}, arg0Loc, range);
auto *arg1y =
spvBuilder.createCompositeExtract(f32, arg1Id, {1}, arg1Loc, range);
auto *arg0z =
spvBuilder.createCompositeExtract(f32, arg0Id, {2}, arg0Loc, range);
auto *arg1w =
spvBuilder.createCompositeExtract(f32, arg1Id, {3}, arg1Loc, range);
auto loc = callExpr->getLocEnd();
auto *arg0yMularg1y =
spvBuilder.createBinaryOp(spv::Op::OpFMul, f32, arg0y, arg1y, loc, range);
return spvBuilder.createCompositeConstruct(
callExpr->getType(),
{spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(1.0f)),
arg0yMularg1y, arg0z, arg1w},
loc, range);
}
SpirvInstruction *SpirvEmitter::processIntrinsicClip(const CallExpr *callExpr) {
// Discards the current pixel if the specified value is less than zero.
// TODO: If the argument can be const folded and evaluated, we could
// potentially avoid creating a branch. This would be a bit challenging for
// matrix/vector arguments.
assert(callExpr->getNumArgs() == 1u);
const Expr *arg = callExpr->getArg(0);
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const auto argType = arg->getType();
const auto boolType = astContext.BoolTy;
SpirvInstruction *condition = nullptr;
// Could not determine the argument as a constant. We need to branch based on
// the argument. If the argument is a vector/matrix, clipping is done if *any*
// element of the vector/matrix is less than zero.
auto *argInstr = doExpr(arg);
QualType elemType = {};
uint32_t elemCount = 0, rowCount = 0, colCount = 0;
if (isScalarType(argType)) {
auto *zero = getValueZero(argType);
condition = spvBuilder.createBinaryOp(spv::Op::OpFOrdLessThan, boolType,
argInstr, zero, loc, range);
} else if (isVectorType(argType, nullptr, &elemCount)) {
auto *zero = getValueZero(argType);
const QualType boolVecType =
astContext.getExtVectorType(boolType, elemCount);
auto *cmp = spvBuilder.createBinaryOp(spv::Op::OpFOrdLessThan, boolVecType,
argInstr, zero, loc, range);
condition =
spvBuilder.createUnaryOp(spv::Op::OpAny, boolType, cmp, loc, range);
} else if (isMxNMatrix(argType, &elemType, &rowCount, &colCount)) {
const auto floatVecType = astContext.getExtVectorType(elemType, colCount);
auto *elemZero = getValueZero(elemType);
llvm::SmallVector<SpirvConstant *, 4> elements(size_t(colCount), elemZero);
auto *zero = spvBuilder.getConstantComposite(floatVecType, elements);
llvm::SmallVector<SpirvInstruction *, 4> cmpResults;
for (uint32_t i = 0; i < rowCount; ++i) {
auto *lhsVec = spvBuilder.createCompositeExtract(floatVecType, argInstr,
{i}, loc, range);
const auto boolColType = astContext.getExtVectorType(boolType, colCount);
auto *cmp = spvBuilder.createBinaryOp(
spv::Op::OpFOrdLessThan, boolColType, lhsVec, zero, loc, range);
auto *any =
spvBuilder.createUnaryOp(spv::Op::OpAny, boolType, cmp, loc, range);
cmpResults.push_back(any);
}
const auto boolRowType = astContext.getExtVectorType(boolType, rowCount);
auto *results = spvBuilder.createCompositeConstruct(boolRowType, cmpResults,
loc, range);
condition =
spvBuilder.createUnaryOp(spv::Op::OpAny, boolType, results, loc, range);
} else {
emitError("invalid argument type passed to clip intrinsic function", loc);
return nullptr;
}
// Then we need to emit the instruction for the conditional branch.
auto *thenBB = spvBuilder.createBasicBlock("if.true");
auto *mergeBB = spvBuilder.createBasicBlock("if.merge");
// Create the branch instruction. This will end the current basic block.
spvBuilder.createConditionalBranch(condition, thenBB, mergeBB, loc, mergeBB,
nullptr,
spv::SelectionControlMask::MaskNone,
spv::LoopControlMask::MaskNone, range);
spvBuilder.addSuccessor(thenBB);
spvBuilder.addSuccessor(mergeBB);
spvBuilder.setMergeTarget(mergeBB);
// Handle the then branch
spvBuilder.setInsertPoint(thenBB);
if (featureManager.isExtensionEnabled(
Extension::EXT_demote_to_helper_invocation) ||
featureManager.isTargetEnvVulkan1p3OrAbove()) {
// OpDemoteToHelperInvocation(EXT) provided by SPIR-V 1.6 or
// SPV_EXT_demote_to_helper_invocation SPIR-V extension allow shaders to
// "demote" a fragment shader invocation to behave like a helper invocation
// for its duration. The demoted invocation will have no further side
// effects and will not output to the framebuffer, but remains active and
// can participate in computing derivatives and in subgroup operations. This
// is a better match for the "clip" instruction in HLSL.
spvBuilder.createDemoteToHelperInvocation(loc);
spvBuilder.createBranch(mergeBB, loc, nullptr, nullptr,
spv::LoopControlMask::MaskNone, range);
} else {
spvBuilder.createKill(loc, range);
}
spvBuilder.addSuccessor(mergeBB);
// From now on, we'll emit instructions into the merge block.
spvBuilder.setInsertPoint(mergeBB);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicClamp(const CallExpr *callExpr) {
// According the HLSL reference: clamp(X, Min, Max) takes 3 arguments. Each
// one may be int, uint, or float.
const QualType returnType = callExpr->getType();
GLSLstd450 glslOpcode = GLSLstd450::GLSLstd450UClamp;
if (isFloatOrVecMatOfFloatType(returnType))
glslOpcode = GLSLstd450::GLSLstd450FClamp;
else if (isSintOrVecMatOfSintType(returnType))
glslOpcode = GLSLstd450::GLSLstd450SClamp;
// Get the function parameters. Expect 3 parameters.
assert(callExpr->getNumArgs() == 3u);
const Expr *argX = callExpr->getArg(0);
const Expr *argMin = callExpr->getArg(1);
const Expr *argMax = callExpr->getArg(2);
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
auto *argXInstr = doExpr(argX);
auto *argMinInstr = doExpr(argMin);
auto *argMaxInstr = doExpr(argMax);
const auto argMinLoc = argMin->getLocStart();
const auto argMaxLoc = argMax->getLocStart();
// FClamp, UClamp, and SClamp do not operate on matrices, so we should perform
// the operation on each vector of the matrix.
if (isMxNMatrix(argX->getType())) {
const auto actOnEachVec = [this, loc, range, glslOpcode, argMinInstr,
argMaxInstr, argMinLoc, argMaxLoc](
uint32_t index, QualType inType,
QualType outType, SpirvInstruction *curRow) {
auto *minRowInstr = spvBuilder.createCompositeExtract(
inType, argMinInstr, {index}, argMinLoc, range);
auto *maxRowInstr = spvBuilder.createCompositeExtract(
inType, argMaxInstr, {index}, argMaxLoc, range);
return spvBuilder.createGLSLExtInst(
outType, glslOpcode, {curRow, minRowInstr, maxRowInstr}, loc, range);
};
return processEachVectorInMatrix(argX, argXInstr, actOnEachVec, loc, range);
}
return spvBuilder.createGLSLExtInst(returnType, glslOpcode,
{argXInstr, argMinInstr, argMaxInstr},
loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicMemoryBarrier(const CallExpr *callExpr,
bool isDevice, bool groupSync,
bool isAllBarrier) {
// * DeviceMemoryBarrier =
// OpMemoryBarrier (memScope=Device,
// sem=Image|Uniform|AcquireRelease)
//
// * DeviceMemoryBarrierWithGroupSync =
// OpControlBarrier(execScope = Workgroup,
// memScope=Device,
// sem=Image|Uniform|AcquireRelease)
const spv::MemorySemanticsMask deviceMemoryBarrierSema =
spv::MemorySemanticsMask::ImageMemory |
spv::MemorySemanticsMask::UniformMemory |
spv::MemorySemanticsMask::AcquireRelease;
// * GroupMemoryBarrier =
// OpMemoryBarrier (memScope=Workgroup,
// sem = Workgroup|AcquireRelease)
//
// * GroupMemoryBarrierWithGroupSync =
// OpControlBarrier (execScope = Workgroup,
// memScope = Workgroup,
// sem = Workgroup|AcquireRelease)
const spv::MemorySemanticsMask groupMemoryBarrierSema =
spv::MemorySemanticsMask::WorkgroupMemory |
spv::MemorySemanticsMask::AcquireRelease;
// * AllMemoryBarrier =
// OpMemoryBarrier(memScope = Device,
// sem = Image|Uniform|Workgroup|AcquireRelease)
//
// * AllMemoryBarrierWithGroupSync =
// OpControlBarrier(execScope = Workgroup,
// memScope = Device,
// sem = Image|Uniform|Workgroup|AcquireRelease)
const spv::MemorySemanticsMask allMemoryBarrierSema =
spv::MemorySemanticsMask::ImageMemory |
spv::MemorySemanticsMask::UniformMemory |
spv::MemorySemanticsMask::WorkgroupMemory |
spv::MemorySemanticsMask::AcquireRelease;
// Get <result-id> for execution scope.
// If present, execution scope is always Workgroup!
llvm::Optional<spv::Scope> execScope = llvm::None;
if (groupSync) {
execScope = spv::Scope::Workgroup;
}
// Get <result-id> for memory scope
const spv::Scope memScope =
(isDevice || isAllBarrier) ? spv::Scope::Device : spv::Scope::Workgroup;
// Get <result-id> for memory semantics
const auto memSemaMask = isAllBarrier ? allMemoryBarrierSema
: isDevice ? deviceMemoryBarrierSema
: groupMemoryBarrierSema;
spvBuilder.createBarrier(memScope, memSemaMask, execScope,
callExpr->getExprLoc(), callExpr->getSourceRange());
return nullptr;
}
SpirvInstruction *SpirvEmitter::processNonFpMatrixTranspose(
QualType matType, SpirvInstruction *matrix, SourceLocation loc,
SourceRange range) {
// Simplest way is to flatten the matrix construct a new matrix from the
// flattened elements. (for a mat4x4).
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
const bool isMat = isMxNMatrix(matType, &elemType, &numRows, &numCols);
assert(isMat && !elemType->isFloatingType());
(void)isMat;
const auto colQualType = astContext.getExtVectorType(elemType, numRows);
// You cannot perform a composite construct of an array using a few vectors.
// The number of constutients passed to OpCompositeConstruct must be equal to
// the number of array elements.
llvm::SmallVector<SpirvInstruction *, 4> elems;
for (uint32_t i = 0; i < numRows; ++i)
for (uint32_t j = 0; j < numCols; ++j)
elems.push_back(spvBuilder.createCompositeExtract(elemType, matrix,
{i, j}, loc, range));
llvm::SmallVector<SpirvInstruction *, 4> cols;
for (uint32_t i = 0; i < numCols; ++i) {
// The elements in the ith vector of the "transposed" array are at offset i,
// i + <original-vector-size>, ...
llvm::SmallVector<SpirvInstruction *, 4> indexes;
for (uint32_t j = 0; j < numRows; ++j)
indexes.push_back(elems[i + (j * numCols)]);
cols.push_back(
spvBuilder.createCompositeConstruct(colQualType, indexes, loc, range));
}
auto transposeType = astContext.getConstantArrayType(
colQualType, llvm::APInt(32, numCols), clang::ArrayType::Normal, 0);
return spvBuilder.createCompositeConstruct(transposeType, cols, loc, range);
}
SpirvInstruction *SpirvEmitter::processNonFpDot(
SpirvInstruction *vec1Id, SpirvInstruction *vec2Id, uint32_t vecSize,
QualType elemType, SourceLocation loc, SourceRange range) {
llvm::SmallVector<SpirvInstruction *, 4> muls;
for (uint32_t i = 0; i < vecSize; ++i) {
auto *elem1 =
spvBuilder.createCompositeExtract(elemType, vec1Id, {i}, loc, range);
auto *elem2 =
spvBuilder.createCompositeExtract(elemType, vec2Id, {i}, loc, range);
muls.push_back(spvBuilder.createBinaryOp(
translateOp(BO_Mul, elemType), elemType, elem1, elem2, loc, range));
}
SpirvInstruction *sum = muls[0];
for (uint32_t i = 1; i < vecSize; ++i) {
sum = spvBuilder.createBinaryOp(translateOp(BO_Add, elemType), elemType,
sum, muls[i], loc, range);
}
return sum;
}
SpirvInstruction *SpirvEmitter::processNonFpScalarTimesMatrix(
QualType scalarType, SpirvInstruction *scalar, QualType matrixType,
SpirvInstruction *matrix, SourceLocation loc, SourceRange range) {
assert(isScalarType(scalarType));
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
const bool isMat = isMxNMatrix(matrixType, &elemType, &numRows, &numCols);
assert(isMat);
assert(isSameType(astContext, scalarType, elemType));
(void)isMat;
// We need to multiply the scalar by each vector of the matrix.
// The front-end guarantees that the scalar and matrix element type are
// the same. For example, if the scalar is a float, the matrix is casted
// to a float matrix before being passed to mul(). It is also guaranteed
// that types such as bool are casted to float or int before being
// passed to mul().
const auto rowType = astContext.getExtVectorType(elemType, numCols);
llvm::SmallVector<SpirvInstruction *, 4> splat(size_t(numCols), scalar);
auto *scalarSplat =
spvBuilder.createCompositeConstruct(rowType, splat, loc, range);
llvm::SmallVector<SpirvInstruction *, 4> mulRows;
for (uint32_t row = 0; row < numRows; ++row) {
auto *rowInstr =
spvBuilder.createCompositeExtract(rowType, matrix, {row}, loc, range);
mulRows.push_back(spvBuilder.createBinaryOp(translateOp(BO_Mul, scalarType),
rowType, rowInstr, scalarSplat,
loc, range));
}
return spvBuilder.createCompositeConstruct(matrixType, mulRows, loc, range);
}
SpirvInstruction *SpirvEmitter::processNonFpVectorTimesMatrix(
QualType vecType, SpirvInstruction *vector, QualType matType,
SpirvInstruction *matrix, SourceLocation loc,
SpirvInstruction *matrixTranspose, SourceRange range) {
// This function assumes that the vector element type and matrix elemet type
// are the same.
QualType vecElemType = {}, matElemType = {};
uint32_t vecSize = 0, numRows = 0, numCols = 0;
const bool isVec = isVectorType(vecType, &vecElemType, &vecSize);
const bool isMat = isMxNMatrix(matType, &matElemType, &numRows, &numCols);
assert(isSameType(astContext, vecElemType, matElemType));
assert(isVec);
assert(isMat);
assert(vecSize == numRows);
(void)isVec;
(void)isMat;
// When processing vector times matrix, the vector is a row vector, and it
// should be multiplied by the matrix *columns*. The most efficient way to
// handle this in SPIR-V would be to first transpose the matrix, and then use
// OpAccessChain.
if (!matrixTranspose)
matrixTranspose = processNonFpMatrixTranspose(matType, matrix, loc, range);
llvm::SmallVector<SpirvInstruction *, 4> resultElems;
for (uint32_t col = 0; col < numCols; ++col) {
auto *colInstr = spvBuilder.createCompositeExtract(vecType, matrixTranspose,
{col}, loc, range);
resultElems.push_back(
processNonFpDot(vector, colInstr, vecSize, vecElemType, loc, range));
}
return spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(vecElemType, numCols), resultElems, loc,
range);
}
SpirvInstruction *SpirvEmitter::processNonFpMatrixTimesVector(
QualType matType, SpirvInstruction *matrix, QualType vecType,
SpirvInstruction *vector, SourceLocation loc, SourceRange range) {
// This function assumes that the vector element type and matrix elemet type
// are the same.
QualType vecElemType = {}, matElemType = {};
uint32_t vecSize = 0, numRows = 0, numCols = 0;
const bool isVec = isVectorType(vecType, &vecElemType, &vecSize);
const bool isMat = isMxNMatrix(matType, &matElemType, &numRows, &numCols);
assert(isSameType(astContext, vecElemType, matElemType));
assert(isVec);
assert(isMat);
assert(vecSize == numCols);
(void)isVec;
(void)isMat;
// When processing matrix times vector, the vector is a column vector. So we
// simply get each row of the matrix and perform a dot product with the
// vector.
llvm::SmallVector<SpirvInstruction *, 4> resultElems;
for (uint32_t row = 0; row < numRows; ++row) {
auto *rowInstr =
spvBuilder.createCompositeExtract(vecType, matrix, {row}, loc, range);
resultElems.push_back(
processNonFpDot(rowInstr, vector, vecSize, vecElemType, loc, range));
}
return spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(vecElemType, numRows), resultElems, loc,
range);
}
SpirvInstruction *SpirvEmitter::processNonFpMatrixTimesMatrix(
QualType lhsType, SpirvInstruction *lhs, QualType rhsType,
SpirvInstruction *rhs, SourceLocation loc, SourceRange range) {
// This function assumes that the vector element type and matrix elemet type
// are the same.
QualType lhsElemType = {}, rhsElemType = {};
uint32_t lhsNumRows = 0, lhsNumCols = 0;
uint32_t rhsNumRows = 0, rhsNumCols = 0;
const bool lhsIsMat =
isMxNMatrix(lhsType, &lhsElemType, &lhsNumRows, &lhsNumCols);
const bool rhsIsMat =
isMxNMatrix(rhsType, &rhsElemType, &rhsNumRows, &rhsNumCols);
assert(isSameType(astContext, lhsElemType, rhsElemType));
assert(lhsIsMat && rhsIsMat);
assert(lhsNumCols == rhsNumRows);
(void)rhsIsMat;
(void)lhsIsMat;
auto *rhsTranspose = processNonFpMatrixTranspose(rhsType, rhs, loc, range);
const auto vecType = astContext.getExtVectorType(lhsElemType, lhsNumCols);
llvm::SmallVector<SpirvInstruction *, 4> resultRows;
for (uint32_t row = 0; row < lhsNumRows; ++row) {
auto *rowInstr =
spvBuilder.createCompositeExtract(vecType, lhs, {row}, loc, range);
resultRows.push_back(processNonFpVectorTimesMatrix(
vecType, rowInstr, rhsType, rhs, loc, rhsTranspose, range));
}
// The resulting matrix will have 'lhsNumRows' rows and 'rhsNumCols' columns.
const auto resultColType =
astContext.getExtVectorType(lhsElemType, rhsNumCols);
const auto resultType = astContext.getConstantArrayType(
resultColType, llvm::APInt(32, lhsNumRows), clang::ArrayType::Normal, 0);
return spvBuilder.createCompositeConstruct(resultType, resultRows, loc,
range);
}
SpirvInstruction *SpirvEmitter::processIntrinsicMul(const CallExpr *callExpr) {
const QualType returnType = callExpr->getType();
// Get the function parameters. Expect 2 parameters.
assert(callExpr->getNumArgs() == 2u);
const Expr *arg0 = callExpr->getArg(0);
const Expr *arg1 = callExpr->getArg(1);
const QualType arg0Type = arg0->getType();
const QualType arg1Type = arg1->getType();
auto loc = callExpr->getExprLoc();
auto range = callExpr->getSourceRange();
// The HLSL mul() function takes 2 arguments. Each argument may be a scalar,
// vector, or matrix. The frontend ensures that the two arguments have the
// same component type. The only allowed component types are int and float.
// mul(scalar, vector)
{
uint32_t elemCount = 0;
if (isScalarType(arg0Type) && isVectorType(arg1Type, nullptr, &elemCount)) {
auto *arg1Id = doExpr(arg1);
// We can use OpVectorTimesScalar if arguments are floats.
if (arg0Type->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpVectorTimesScalar,
returnType, arg1Id, doExpr(arg0), loc,
range);
// Use OpIMul for integers
return spvBuilder.createBinaryOp(spv::Op::OpIMul, returnType,
createVectorSplat(arg0, elemCount),
arg1Id, loc, range);
}
}
// mul(vector, scalar)
{
uint32_t elemCount = 0;
if (isVectorType(arg0Type, nullptr, &elemCount) && isScalarType(arg1Type)) {
auto *arg0Id = doExpr(arg0);
// We can use OpVectorTimesScalar if arguments are floats.
if (arg1Type->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpVectorTimesScalar,
returnType, arg0Id, doExpr(arg1), loc,
range);
// Use OpIMul for integers
return spvBuilder.createBinaryOp(spv::Op::OpIMul, returnType, arg0Id,
createVectorSplat(arg1, elemCount), loc,
range);
}
}
// mul(vector, vector)
if (isVectorType(arg0Type) && isVectorType(arg1Type)) {
// mul( Mat(1xM), Mat(Mx1) ) results in a scalar (same as dot product)
if (isScalarType(returnType)) {
return processIntrinsicDot(callExpr);
}
// mul( Mat(Mx1), Mat(1xN) ) results in a MxN matrix.
QualType elemType = {};
uint32_t numRows = 0;
if (isMxNMatrix(returnType, &elemType, &numRows)) {
llvm::SmallVector<SpirvInstruction *, 4> rows;
auto *arg0Id = doExpr(arg0);
auto *arg1Id = doExpr(arg1);
for (uint32_t i = 0; i < numRows; ++i) {
auto *scalar = spvBuilder.createCompositeExtract(elemType, arg0Id, {i},
loc, range);
rows.push_back(spvBuilder.createBinaryOp(spv::Op::OpVectorTimesScalar,
arg1Type, arg1Id, scalar, loc,
range));
}
return spvBuilder.createCompositeConstruct(returnType, rows, loc, range);
}
llvm_unreachable("bad arguments passed to mul");
}
// All the following cases require handling arg0 and arg1 expressions first.
auto *arg0Id = doExpr(arg0);
auto *arg1Id = doExpr(arg1);
// mul(scalar, scalar)
if (isScalarType(arg0Type) && isScalarType(arg1Type))
return spvBuilder.createBinaryOp(translateOp(BO_Mul, arg0Type), returnType,
arg0Id, arg1Id, loc, range);
// mul(scalar, matrix)
{
QualType elemType = {};
if (isScalarType(arg0Type) && isMxNMatrix(arg1Type, &elemType)) {
// OpMatrixTimesScalar can only be used if *both* the matrix element type
// and the scalar type are float.
if (arg0Type->isFloatingType() && elemType->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpMatrixTimesScalar,
returnType, arg1Id, arg0Id, loc,
range);
else
return processNonFpScalarTimesMatrix(arg0Type, arg0Id, arg1Type, arg1Id,
callExpr->getExprLoc(), range);
}
}
// mul(matrix, scalar)
{
QualType elemType = {};
if (isScalarType(arg1Type) && isMxNMatrix(arg0Type, &elemType)) {
// OpMatrixTimesScalar can only be used if *both* the matrix element type
// and the scalar type are float.
if (arg1Type->isFloatingType() && elemType->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpMatrixTimesScalar,
returnType, arg0Id, arg1Id, loc,
range);
else
return processNonFpScalarTimesMatrix(arg1Type, arg1Id, arg0Type, arg0Id,
callExpr->getExprLoc(), range);
}
}
// mul(vector, matrix)
{
QualType vecElemType = {}, matElemType = {};
uint32_t elemCount = 0, numRows = 0;
if (isVectorType(arg0Type, &vecElemType, &elemCount) &&
isMxNMatrix(arg1Type, &matElemType, &numRows)) {
assert(elemCount == numRows);
if (vecElemType->isFloatingType() && matElemType->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpMatrixTimesVector,
returnType, arg1Id, arg0Id, loc,
range);
else
return processNonFpVectorTimesMatrix(arg0Type, arg0Id, arg1Type, arg1Id,
callExpr->getExprLoc(), nullptr,
range);
}
}
// mul(matrix, vector)
{
QualType vecElemType = {}, matElemType = {};
uint32_t elemCount = 0, numCols = 0;
if (isMxNMatrix(arg0Type, &matElemType, nullptr, &numCols) &&
isVectorType(arg1Type, &vecElemType, &elemCount)) {
assert(elemCount == numCols);
if (vecElemType->isFloatingType() && matElemType->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpVectorTimesMatrix,
returnType, arg1Id, arg0Id, loc,
range);
else
return processNonFpMatrixTimesVector(arg0Type, arg0Id, arg1Type, arg1Id,
callExpr->getExprLoc(), range);
}
}
// mul(matrix, matrix)
{
// The front-end ensures that the two matrix element types match.
QualType elemType = {};
uint32_t lhsCols = 0, rhsRows = 0;
if (isMxNMatrix(arg0Type, &elemType, nullptr, &lhsCols) &&
isMxNMatrix(arg1Type, nullptr, &rhsRows, nullptr)) {
assert(lhsCols == rhsRows);
if (elemType->isFloatingType())
return spvBuilder.createBinaryOp(spv::Op::OpMatrixTimesMatrix,
returnType, arg1Id, arg0Id, loc,
range);
else
return processNonFpMatrixTimesMatrix(arg0Type, arg0Id, arg1Type, arg1Id,
callExpr->getExprLoc(), range);
}
}
emitError("invalid argument type passed to mul intrinsic function",
callExpr->getExprLoc());
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicPrintf(const CallExpr *callExpr) {
// C99, s6.5.2.2/6: "If the expression that denotes the called function has a
// type that does not include a prototype, the integer promotions are
// performed on each argument, and arguments that have type float are promoted
// to double. These are called the default argument promotions."
// C++: All the variadic parameters undergo default promotions before they're
// received by the function.
//
// Therefore by default floating point arguments will be evaluated as double
// by this function.
//
// TODO: We may want to change this behavior for SPIR-V.
const auto returnType = callExpr->getType();
const auto numArgs = callExpr->getNumArgs();
const auto loc = callExpr->getExprLoc();
assert(numArgs >= 1u);
llvm::SmallVector<SpirvInstruction *, 4> args;
for (uint32_t argIndex = 0; argIndex < numArgs; ++argIndex)
args.push_back(doExpr(callExpr->getArg(argIndex)));
return spvBuilder.createNonSemanticDebugPrintfExtInst(
returnType, NonSemanticDebugPrintfDebugPrintf, args, loc);
}
SpirvInstruction *SpirvEmitter::processIntrinsicDot(const CallExpr *callExpr) {
// Get the function parameters. Expect 2 vectors as parameters.
assert(callExpr->getNumArgs() == 2u);
const Expr *arg0 = callExpr->getArg(0);
const Expr *arg1 = callExpr->getArg(1);
auto *arg0Id = doExpr(arg0);
auto *arg1Id = doExpr(arg1);
QualType arg0Type = arg0->getType();
QualType arg1Type = arg1->getType();
uint32_t vec0Size = 0, vec1Size = 0;
QualType vec0ComponentType = {}, vec1ComponentType = {};
QualType returnType = {};
const bool arg0isScalarOrVec =
isScalarOrVectorType(arg0Type, &vec0ComponentType, &vec0Size);
const bool arg1isScalarOrVec =
isScalarOrVectorType(arg1Type, &vec1ComponentType, &vec1Size);
const bool returnIsScalar = isScalarType(callExpr->getType(), &returnType);
// Each argument should either be a vector or a scalar
assert(arg0isScalarOrVec && arg1isScalarOrVec);
// The result type must be a scalar.
assert(returnIsScalar);
// The element type of each argument must be the same.
assert(vec0ComponentType == vec1ComponentType);
// The size of the two arguments must be equal.
assert(vec0Size == vec1Size);
// Acceptable vector sizes are 1,2,3,4.
assert(vec0Size >= 1 && vec0Size <= 4);
(void)arg0isScalarOrVec;
(void)arg1isScalarOrVec;
(void)returnIsScalar;
(void)vec0ComponentType;
(void)vec1ComponentType;
(void)vec1Size;
auto loc = callExpr->getLocStart();
auto range = callExpr->getSourceRange();
// According to HLSL reference, the dot function only works on integers
// and floats.
assert(returnType->isFloatingType() || returnType->isIntegerType());
// Special case: dot product of two vectors, each of size 1. That is
// basically the same as regular multiplication of 2 scalars.
if (vec0Size == 1) {
const spv::Op spvOp = translateOp(BO_Mul, arg0Type);
return spvBuilder.createBinaryOp(spvOp, returnType, arg0Id, arg1Id, loc,
range);
}
// If the vectors are of type Float, we can use OpDot.
if (returnType->isFloatingType()) {
return spvBuilder.createBinaryOp(spv::Op::OpDot, returnType, arg0Id, arg1Id,
loc, range);
}
// Vector component type is Integer (signed or unsigned).
// Create all instructions necessary to perform a dot product on
// two integer vectors. SPIR-V OpDot does not support integer vectors.
// Therefore, we use other SPIR-V instructions (addition and
// multiplication).
else {
SpirvInstruction *result = nullptr;
llvm::SmallVector<SpirvInstruction *, 4> multIds;
const spv::Op multSpvOp = translateOp(BO_Mul, arg0Type);
const spv::Op addSpvOp = translateOp(BO_Add, arg0Type);
// Extract members from the two vectors and multiply them.
for (unsigned int i = 0; i < vec0Size; ++i) {
auto *vec0member = spvBuilder.createCompositeExtract(
returnType, arg0Id, {i}, arg0->getLocStart(), range);
auto *vec1member = spvBuilder.createCompositeExtract(
returnType, arg1Id, {i}, arg1->getLocStart(), range);
auto *multId = spvBuilder.createBinaryOp(
multSpvOp, returnType, vec0member, vec1member, loc, range);
multIds.push_back(multId);
}
// Add all the multiplications.
result = multIds[0];
for (unsigned int i = 1; i < vec0Size; ++i) {
auto *additionId = spvBuilder.createBinaryOp(addSpvOp, returnType, result,
multIds[i], loc, range);
result = additionId;
}
return result;
}
}
SpirvInstruction *SpirvEmitter::processIntrinsicRcp(const CallExpr *callExpr) {
// 'rcp' takes only 1 argument that is a scalar, vector, or matrix of type
// float or double.
assert(callExpr->getNumArgs() == 1u);
const QualType returnType = callExpr->getType();
const Expr *arg = callExpr->getArg(0);
auto *argId = doExpr(arg);
const QualType argType = arg->getType();
auto loc = callExpr->getLocStart();
auto range = callExpr->getSourceRange();
// For cases with matrix argument.
QualType elemType = {};
uint32_t numRows = 0, numCols = 0;
if (isMxNMatrix(argType, &elemType, &numRows, &numCols)) {
auto *vecOne = getVecValueOne(elemType, numCols);
const auto actOnEachVec =
[this, vecOne, loc, range](uint32_t /*index*/, QualType inType,
QualType outType, SpirvInstruction *curRow) {
return spvBuilder.createBinaryOp(spv::Op::OpFDiv, outType, vecOne,
curRow, loc, range);
};
return processEachVectorInMatrix(arg, argId, actOnEachVec, loc, range);
}
// For cases with scalar or vector arguments.
return spvBuilder.createBinaryOp(spv::Op::OpFDiv, returnType,
getValueOne(argType), argId, loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicReadClock(const CallExpr *callExpr) {
auto *scope = doExpr(callExpr->getArg(0));
assert(scope->getAstResultType()->isIntegerType());
return spvBuilder.createReadClock(scope, callExpr->getExprLoc());
}
SpirvInstruction *
SpirvEmitter::processIntrinsicAllOrAny(const CallExpr *callExpr,
spv::Op spvOp) {
// 'all' and 'any' take only 1 parameter.
assert(callExpr->getNumArgs() == 1u);
const QualType returnType = callExpr->getType();
const Expr *arg = callExpr->getArg(0);
const QualType argType = arg->getType();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
// Handle scalars, vectors of size 1, and 1x1 matrices as arguments.
// Optimization: can directly cast them to boolean. No need for OpAny/OpAll.
{
QualType scalarType = {};
if (isScalarType(argType, &scalarType) &&
(scalarType->isBooleanType() || scalarType->isFloatingType() ||
scalarType->isIntegerType()))
return castToBool(doExpr(arg), argType, returnType, loc, range);
}
// Handle vectors larger than 1, Mx1 matrices, and 1xN matrices as arguments.
// Cast the vector to a boolean vector, then run OpAny/OpAll on it.
{
QualType elemType = {};
uint32_t size = 0;
if (isVectorType(argType, &elemType, &size)) {
const QualType castToBoolType =
astContext.getExtVectorType(returnType, size);
auto *castedToBool =
castToBool(doExpr(arg), argType, castToBoolType, loc, range);
return spvBuilder.createUnaryOp(spvOp, returnType, castedToBool, loc,
range);
}
}
// Handle MxN matrices as arguments.
{
QualType elemType = {};
uint32_t matRowCount = 0, matColCount = 0;
if (isMxNMatrix(argType, &elemType, &matRowCount, &matColCount)) {
auto *matrix = doExpr(arg);
const QualType vecType = getComponentVectorType(astContext, argType);
llvm::SmallVector<SpirvInstruction *, 4> rowResults;
for (uint32_t i = 0; i < matRowCount; ++i) {
// Extract the row which is a float vector of size matColCount.
auto *rowFloatVec = spvBuilder.createCompositeExtract(
vecType, matrix, {i}, arg->getLocStart(), range);
// Cast the float vector to boolean vector.
const auto rowFloatQualType =
astContext.getExtVectorType(elemType, matColCount);
const auto rowBoolQualType =
astContext.getExtVectorType(returnType, matColCount);
auto *rowBoolVec =
castToBool(rowFloatVec, rowFloatQualType, rowBoolQualType,
arg->getLocStart(), range);
// Perform OpAny/OpAll on the boolean vector.
rowResults.push_back(spvBuilder.createUnaryOp(spvOp, returnType,
rowBoolVec, loc, range));
}
// Create a new vector that is the concatenation of results of all rows.
const QualType vecOfBools =
astContext.getExtVectorType(astContext.BoolTy, matRowCount);
auto *row = spvBuilder.createCompositeConstruct(vecOfBools, rowResults,
loc, range);
// Run OpAny/OpAll on the newly-created vector.
return spvBuilder.createUnaryOp(spvOp, returnType, row, loc, range);
}
}
// All types should be handled already.
llvm_unreachable("Unknown argument type passed to all()/any().");
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicAsType(const CallExpr *callExpr) {
// This function handles the following intrinsics:
// 'asint'
// 'asint16'
// 'asuint'
// 'asuint16'
// 'asfloat'
// 'asfloat16'
// 'asdouble'
// Note: The logic for the 32-bit and 16-bit variants of these functions is
// identical so we don't bother distinguishing between related types
// like float and float16 in the comments.
// Method 1: ret asint(arg)
// arg component type = {float, uint}
// arg template type = {scalar, vector, matrix}
// ret template type = same as arg template type.
// ret component type = int
// Method 2: ret asuint(arg)
// arg component type = {float, int}
// arg template type = {scalar, vector, matrix}
// ret template type = same as arg template type.
// ret component type = uint
// Method 3: ret asfloat(arg)
// arg component type = {float, uint, int}
// arg template type = {scalar, vector, matrix}
// ret template type = same as arg template type.
// ret component type = float
// Method 4: double asdouble(uint lowbits, uint highbits)
// Method 5: double2 asdouble(uint2 lowbits, uint2 highbits)
// Method 6:
// void asuint(
// in double value,
// out uint lowbits,
// out uint highbits
// );
const QualType returnType = callExpr->getType();
const uint32_t numArgs = callExpr->getNumArgs();
const Expr *arg0 = callExpr->getArg(0);
const QualType argType = arg0->getType();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
// Method 3 return type may be the same as arg type, so it would be a no-op.
if (isSameType(astContext, returnType, argType))
return doExpr(arg0);
switch (numArgs) {
case 1: {
// Handling Method 1, 2, and 3.
auto *argInstr = loadIfGLValue(arg0);
QualType fromElemType = {};
uint32_t numRows = 0, numCols = 0;
// For non-matrix arguments (scalar or vector), just do an OpBitCast.
if (!isMxNMatrix(argType, &fromElemType, &numRows, &numCols)) {
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, returnType, argInstr,
loc, range);
}
// Input or output type is a matrix.
const QualType toElemType = hlsl::GetHLSLMatElementType(returnType);
llvm::SmallVector<SpirvInstruction *, 4> castedRows;
const auto fromVecType = astContext.getExtVectorType(fromElemType, numCols);
const auto toVecType = astContext.getExtVectorType(toElemType, numCols);
for (uint32_t row = 0; row < numRows; ++row) {
auto *rowInstr = spvBuilder.createCompositeExtract(
fromVecType, argInstr, {row}, arg0->getLocStart(), range);
castedRows.push_back(spvBuilder.createUnaryOp(
spv::Op::OpBitcast, toVecType, rowInstr, loc, range));
}
return spvBuilder.createCompositeConstruct(returnType, castedRows, loc,
range);
}
case 2: {
auto *lowbits = doExpr(arg0);
auto *highbits = doExpr(callExpr->getArg(1));
const auto uintType = astContext.UnsignedIntTy;
const auto doubleType = astContext.DoubleTy;
// Handling Method 4
if (argType->isUnsignedIntegerType()) {
const auto uintVec2Type = astContext.getExtVectorType(uintType, 2);
auto *operand = spvBuilder.createCompositeConstruct(
uintVec2Type, {lowbits, highbits}, loc, range);
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, doubleType, operand,
loc, range);
}
// Handling Method 5
else {
const auto uintVec4Type = astContext.getExtVectorType(uintType, 4);
const auto doubleVec2Type = astContext.getExtVectorType(doubleType, 2);
auto *operand = spvBuilder.createVectorShuffle(
uintVec4Type, lowbits, highbits, {0, 2, 1, 3}, loc, range);
return spvBuilder.createUnaryOp(spv::Op::OpBitcast, doubleVec2Type,
operand, loc, range);
}
}
case 3: {
// Handling Method 6.
auto *value = doExpr(arg0);
auto *lowbits = doExpr(callExpr->getArg(1));
auto *highbits = doExpr(callExpr->getArg(2));
const auto uintType = astContext.UnsignedIntTy;
const auto uintVec2Type = astContext.getExtVectorType(uintType, 2);
auto *vecResult = spvBuilder.createUnaryOp(spv::Op::OpBitcast, uintVec2Type,
value, loc, range);
spvBuilder.createStore(
lowbits,
spvBuilder.createCompositeExtract(uintType, vecResult, {0},
arg0->getLocStart(), range),
loc, range);
spvBuilder.createStore(
highbits,
spvBuilder.createCompositeExtract(uintType, vecResult, {1},
arg0->getLocStart(), range),
loc, range);
return nullptr;
}
default:
emitError("unrecognized signature for %0 intrinsic function", loc)
<< getFunctionOrOperatorName(callExpr->getDirectCallee(), true);
return nullptr;
}
}
SpirvInstruction *
SpirvEmitter::processD3DCOLORtoUBYTE4(const CallExpr *callExpr) {
// Should take a float4 and return an int4 by doing:
// int4 result = input.zyxw * 255.001953;
// Maximum float precision makes the scaling factor 255.002.
const auto arg = callExpr->getArg(0);
auto *argId = doExpr(arg);
const auto argType = arg->getType();
auto loc = callExpr->getLocStart();
auto range = callExpr->getSourceRange();
auto *swizzle = spvBuilder.createVectorShuffle(argType, argId, argId,
{2, 1, 0, 3}, loc, range);
auto *scaled = spvBuilder.createBinaryOp(
spv::Op::OpVectorTimesScalar, argType, swizzle,
spvBuilder.getConstantFloat(astContext.FloatTy, llvm::APFloat(255.002f)),
loc, range);
return castToInt(scaled, arg->getType(), callExpr->getType(), loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicIsFinite(const CallExpr *callExpr) {
// Since OpIsFinite needs the Kernel capability, translation is instead done
// using OpIsNan and OpIsInf:
// isFinite = !(isNan || isInf)
const auto arg = doExpr(callExpr->getArg(0));
const auto returnType = callExpr->getType();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const auto isNan =
spvBuilder.createUnaryOp(spv::Op::OpIsNan, returnType, arg, loc, range);
const auto isInf =
spvBuilder.createUnaryOp(spv::Op::OpIsInf, returnType, arg, loc, range);
const auto isNanOrInf = spvBuilder.createBinaryOp(
spv::Op::OpLogicalOr, returnType, isNan, isInf, loc, range);
return spvBuilder.createUnaryOp(spv::Op::OpLogicalNot, returnType, isNanOrInf,
loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicSinCos(const CallExpr *callExpr) {
// Since there is no sincos equivalent in SPIR-V, we need to perform Sin
// once and Cos once. We can reuse existing Sine/Cosine handling functions.
CallExpr *sincosExpr =
new (astContext) CallExpr(astContext, Stmt::StmtClass::NoStmtClass, {});
sincosExpr->setType(callExpr->getArg(0)->getType());
sincosExpr->setNumArgs(astContext, 1);
sincosExpr->setArg(0, const_cast<Expr *>(callExpr->getArg(0)));
const auto srcLoc = callExpr->getExprLoc();
const auto srcRange = callExpr->getSourceRange();
// Perform Sin and store results in argument 1.
auto *sin = processIntrinsicUsingGLSLInst(
sincosExpr, GLSLstd450::GLSLstd450Sin,
/*actPerRowForMatrices*/ true, srcLoc, srcRange);
spvBuilder.createStore(doExpr(callExpr->getArg(1)), sin, srcLoc, srcRange);
// Perform Cos and store results in argument 2.
auto *cos = processIntrinsicUsingGLSLInst(
sincosExpr, GLSLstd450::GLSLstd450Cos,
/*actPerRowForMatrices*/ true, srcLoc, srcRange);
spvBuilder.createStore(doExpr(callExpr->getArg(2)), cos, srcLoc, srcRange);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicSaturate(const CallExpr *callExpr) {
const auto *arg = callExpr->getArg(0);
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
auto *argId = doExpr(arg);
const auto argType = arg->getType();
const QualType returnType = callExpr->getType();
QualType elemType = {};
uint32_t vecSize = 0;
if (isScalarType(argType, &elemType)) {
auto *floatZero = getValueZero(elemType);
auto *floatOne = getValueOne(elemType);
return spvBuilder.createGLSLExtInst(
returnType, GLSLstd450::GLSLstd450FClamp, {argId, floatZero, floatOne},
loc, range);
}
if (isVectorType(argType, &elemType, &vecSize)) {
auto *vecZero = getVecValueZero(elemType, vecSize);
auto *vecOne = getVecValueOne(elemType, vecSize);
return spvBuilder.createGLSLExtInst(returnType,
GLSLstd450::GLSLstd450FClamp,
{argId, vecZero, vecOne}, loc, range);
}
uint32_t numRows = 0, numCols = 0;
if (isMxNMatrix(argType, &elemType, &numRows, &numCols)) {
auto *vecZero = getVecValueZero(elemType, numCols);
auto *vecOne = getVecValueOne(elemType, numCols);
const auto actOnEachVec = [this, loc, vecZero, vecOne, range](
uint32_t /*index*/, QualType inType,
QualType outType, SpirvInstruction *curRow) {
return spvBuilder.createGLSLExtInst(outType, GLSLstd450::GLSLstd450FClamp,
{curRow, vecZero, vecOne}, loc,
range);
};
return processEachVectorInMatrix(arg, argId, actOnEachVec, loc, range);
}
emitError("invalid argument type passed to saturate intrinsic function",
callExpr->getExprLoc());
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicFloatSign(const CallExpr *callExpr) {
// Import the GLSL.std.450 extended instruction set.
const Expr *arg = callExpr->getArg(0);
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const QualType returnType = callExpr->getType();
const QualType argType = arg->getType();
assert(isFloatOrVecMatOfFloatType(argType));
auto *argId = doExpr(arg);
SpirvInstruction *floatSign = nullptr;
// For matrices, we can perform the instruction on each vector of the matrix.
if (isMxNMatrix(argType)) {
const auto actOnEachVec = [this, loc, range](
uint32_t /*index*/, QualType inType,
QualType outType, SpirvInstruction *curRow) {
return spvBuilder.createGLSLExtInst(outType, GLSLstd450::GLSLstd450FSign,
{curRow}, loc, range);
};
floatSign = processEachVectorInMatrix(arg, argId, actOnEachVec, loc, range);
} else {
floatSign = spvBuilder.createGLSLExtInst(
argType, GLSLstd450::GLSLstd450FSign, {argId}, loc, range);
}
return castToInt(floatSign, arg->getType(), returnType, arg->getLocStart());
}
SpirvInstruction *
SpirvEmitter::processIntrinsicF16ToF32(const CallExpr *callExpr) {
// f16tof32() takes in (vector of) uint and returns (vector of) float.
// The frontend should guarantee that by inserting implicit casts.
const QualType f32Type = astContext.FloatTy;
const QualType u32Type = astContext.UnsignedIntTy;
const QualType v2f32Type = astContext.getExtVectorType(f32Type, 2);
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const auto *arg = callExpr->getArg(0);
auto *argId = doExpr(arg);
uint32_t elemCount = {};
if (isVectorType(arg->getType(), nullptr, &elemCount)) {
// The input is a vector. We need to handle each element separately.
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < elemCount; ++i) {
auto *srcElem = spvBuilder.createCompositeExtract(
u32Type, argId, {i}, arg->getLocStart(), range);
auto *convert = spvBuilder.createGLSLExtInst(
v2f32Type, GLSLstd450::GLSLstd450UnpackHalf2x16, srcElem, loc, range);
elements.push_back(
spvBuilder.createCompositeExtract(f32Type, convert, {0}, loc, range));
}
return spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(f32Type, elemCount), elements, loc, range);
}
auto *convert = spvBuilder.createGLSLExtInst(
v2f32Type, GLSLstd450::GLSLstd450UnpackHalf2x16, argId, loc, range);
// f16tof32() converts the float16 stored in the low-half of the uint to
// a float. So just need to return the first component.
return spvBuilder.createCompositeExtract(f32Type, convert, {0}, loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsicF32ToF16(const CallExpr *callExpr) {
// f32tof16() takes in (vector of) float and returns (vector of) uint.
// The frontend should guarantee that by inserting implicit casts.
const QualType f32Type = astContext.FloatTy;
const QualType u32Type = astContext.UnsignedIntTy;
const QualType v2f32Type = astContext.getExtVectorType(f32Type, 2);
auto *zero = spvBuilder.getConstantFloat(f32Type, llvm::APFloat(0.0f));
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const auto *arg = callExpr->getArg(0);
auto *argId = doExpr(arg);
uint32_t elemCount = {};
if (isVectorType(arg->getType(), nullptr, &elemCount)) {
// The input is a vector. We need to handle each element separately.
llvm::SmallVector<SpirvInstruction *, 4> elements;
for (uint32_t i = 0; i < elemCount; ++i) {
auto *srcElem = spvBuilder.createCompositeExtract(
f32Type, argId, {i}, arg->getLocStart(), range);
auto *srcVec = spvBuilder.createCompositeConstruct(
v2f32Type, {srcElem, zero}, loc, range);
elements.push_back(spvBuilder.createGLSLExtInst(
u32Type, GLSLstd450::GLSLstd450PackHalf2x16, srcVec, loc, range));
}
return spvBuilder.createCompositeConstruct(
astContext.getExtVectorType(u32Type, elemCount), elements, loc, range);
}
// f16tof32() stores the float into the low-half of the uint. So we need
// to supply another zero to take the other half.
auto *srcVec =
spvBuilder.createCompositeConstruct(v2f32Type, {argId, zero}, loc, range);
return spvBuilder.createGLSLExtInst(
u32Type, GLSLstd450::GLSLstd450PackHalf2x16, srcVec, loc, range);
}
SpirvInstruction *SpirvEmitter::processIntrinsicUsingSpirvInst(
const CallExpr *callExpr, spv::Op opcode, bool actPerRowForMatrices) {
// The derivative opcodes are only allowed in pixel shader, or in compute
// shaderers when the SPV_NV_compute_shader_derivatives is enabled.
if (!spvContext.isPS()) {
// For cases where the instructions are known to be invalid, we turn on
// legalization expecting the invalid use to be optimized away. For compute
// shaders, we add the execution mode to enable the derivatives. We legalize
// in this case as well because that is what we did before the extension was
// used, and we do not want to change previous behaviour too much.
switch (opcode) {
case spv::Op::OpDPdx:
case spv::Op::OpDPdy:
case spv::Op::OpDPdxFine:
case spv::Op::OpDPdyFine:
case spv::Op::OpDPdxCoarse:
case spv::Op::OpDPdyCoarse:
case spv::Op::OpFwidth:
case spv::Op::OpFwidthFine:
case spv::Op::OpFwidthCoarse:
if (spvContext.isCS())
addDerivativeGroupExecutionMode();
needsLegalization = true;
break;
default:
// Only the given opcodes need legalization and the execution mode.
break;
}
}
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
const QualType returnType = callExpr->getType();
if (callExpr->getNumArgs() == 1u) {
const Expr *arg = callExpr->getArg(0);
auto *argId = doExpr(arg);
// If the instruction does not operate on matrices, we can perform the
// instruction on each vector of the matrix.
if (actPerRowForMatrices && isMxNMatrix(arg->getType())) {
assert(isMxNMatrix(returnType));
const auto actOnEachVec = [this, opcode, loc,
range](uint32_t /*index*/, QualType inType,
QualType outType,
SpirvInstruction *curRow) {
return spvBuilder.createUnaryOp(opcode, outType, curRow, loc, range);
};
return processEachVectorInMatrix(arg, returnType, argId, actOnEachVec,
loc, range);
}
return spvBuilder.createUnaryOp(opcode, returnType, argId, loc, range);
} else if (callExpr->getNumArgs() == 2u) {
const Expr *arg0 = callExpr->getArg(0);
auto *arg0Id = doExpr(arg0);
auto *arg1Id = doExpr(callExpr->getArg(1));
const auto arg1Loc = callExpr->getArg(1)->getLocStart();
const auto arg1Range = callExpr->getArg(1)->getSourceRange();
// If the instruction does not operate on matrices, we can perform the
// instruction on each vector of the matrix.
if (actPerRowForMatrices && isMxNMatrix(arg0->getType())) {
const auto actOnEachVec = [this, opcode, arg1Id, loc, range, arg1Loc,
arg1Range](uint32_t index, QualType inType,
QualType outType,
SpirvInstruction *arg0Row) {
auto *arg1Row = spvBuilder.createCompositeExtract(
inType, arg1Id, {index}, arg1Loc, arg1Range);
return spvBuilder.createBinaryOp(opcode, outType, arg0Row, arg1Row, loc,
range);
};
return processEachVectorInMatrix(arg0, arg0Id, actOnEachVec, loc, range);
}
return spvBuilder.createBinaryOp(opcode, returnType, arg0Id, arg1Id, loc,
range);
}
emitError("unsupported %0 intrinsic function", loc)
<< cast<DeclRefExpr>(callExpr->getCallee())->getNameInfo().getAsString();
return nullptr;
}
SpirvInstruction *SpirvEmitter::processIntrinsicUsingGLSLInst(
const CallExpr *callExpr, GLSLstd450 opcode, bool actPerRowForMatrices,
SourceLocation loc, SourceRange range) {
// Import the GLSL.std.450 extended instruction set.
const QualType returnType = callExpr->getType();
if (callExpr->getNumArgs() == 1u) {
const Expr *arg = callExpr->getArg(0);
auto *argInstr = doExpr(arg);
// If the instruction does not operate on matrices, we can perform the
// instruction on each vector of the matrix.
if (actPerRowForMatrices && isMxNMatrix(arg->getType())) {
const auto actOnEachVec = [this, loc, range,
opcode](uint32_t /*index*/, QualType inType,
QualType outType,
SpirvInstruction *curRowInstr) {
return spvBuilder.createGLSLExtInst(outType, opcode, {curRowInstr}, loc,
range);
};
return processEachVectorInMatrix(arg, argInstr, actOnEachVec, loc, range);
}
return spvBuilder.createGLSLExtInst(returnType, opcode, {argInstr}, loc,
range);
} else if (callExpr->getNumArgs() == 2u) {
const Expr *arg0 = callExpr->getArg(0);
auto *arg0Instr = doExpr(arg0);
auto *arg1Instr = doExpr(callExpr->getArg(1));
const auto arg1Loc = callExpr->getArg(1)->getLocStart();
const auto arg1Range = callExpr->getArg(1)->getSourceRange();
// If the instruction does not operate on matrices, we can perform the
// instruction on each vector of the matrix.
if (actPerRowForMatrices && isMxNMatrix(arg0->getType())) {
const auto actOnEachVec = [this, loc, range, opcode, arg1Instr, arg1Range,
arg1Loc](uint32_t index, QualType inType,
QualType outType,
SpirvInstruction *arg0RowInstr) {
auto *arg1RowInstr = spvBuilder.createCompositeExtract(
inType, arg1Instr, {index}, arg1Loc, arg1Range);
return spvBuilder.createGLSLExtInst(
outType, opcode, {arg0RowInstr, arg1RowInstr}, loc, range);
};
return processEachVectorInMatrix(arg0, arg0Instr, actOnEachVec, loc,
range);
}
return spvBuilder.createGLSLExtInst(returnType, opcode,
{arg0Instr, arg1Instr}, loc, range);
} else if (callExpr->getNumArgs() == 3u) {
const Expr *arg0 = callExpr->getArg(0);
auto *arg0Instr = doExpr(arg0);
auto *arg1Instr = doExpr(callExpr->getArg(1));
auto *arg2Instr = doExpr(callExpr->getArg(2));
auto arg1Loc = callExpr->getArg(1)->getLocStart();
auto arg2Loc = callExpr->getArg(2)->getLocStart();
const auto arg1Range = callExpr->getArg(1)->getSourceRange();
const auto arg2Range = callExpr->getArg(2)->getSourceRange();
// If the instruction does not operate on matrices, we can perform the
// instruction on each vector of the matrix.
if (actPerRowForMatrices && isMxNMatrix(arg0->getType())) {
const auto actOnEachVec = [this, loc, range, opcode, arg1Instr, arg2Instr,
arg1Loc, arg2Loc, arg1Range,
arg2Range](uint32_t index, QualType inType,
QualType outType,
SpirvInstruction *arg0RowInstr) {
auto *arg1RowInstr = spvBuilder.createCompositeExtract(
inType, arg1Instr, {index}, arg1Loc, arg1Range);
auto *arg2RowInstr = spvBuilder.createCompositeExtract(
inType, arg2Instr, {index}, arg2Loc, arg2Range);
return spvBuilder.createGLSLExtInst(
outType, opcode, {arg0RowInstr, arg1RowInstr, arg2RowInstr}, loc,
range);
};
return processEachVectorInMatrix(arg0, arg0Instr, actOnEachVec, loc,
range);
}
return spvBuilder.createGLSLExtInst(
returnType, opcode, {arg0Instr, arg1Instr, arg2Instr}, loc, range);
}
emitError("unsupported %0 intrinsic function", callExpr->getExprLoc())
<< cast<DeclRefExpr>(callExpr->getCallee())->getNameInfo().getAsString();
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processIntrinsicLog10(const CallExpr *callExpr) {
// Since there is no log10 instruction in SPIR-V, we can use:
// log10(x) = log2(x) * ( 1 / log2(10) )
// 1 / log2(10) = 0.30103
auto loc = callExpr->getExprLoc();
auto range = callExpr->getSourceRange();
const auto returnType = callExpr->getType();
auto scalarType = getElementType(astContext, returnType);
auto *scale =
spvBuilder.getConstantFloat(scalarType, llvm::APFloat(0.30103f));
auto *log2 = processIntrinsicUsingGLSLInst(
callExpr, GLSLstd450::GLSLstd450Log2, true, loc, range);
spv::Op scaleOp = isScalarType(returnType) ? spv::Op::OpFMul
: isVectorType(returnType) ? spv::Op::OpVectorTimesScalar
: spv::Op::OpMatrixTimesScalar;
return spvBuilder.createBinaryOp(scaleOp, returnType, log2, scale, loc,
range);
}
SpirvInstruction *SpirvEmitter::processIntrinsicDP4a(const CallExpr *callExpr,
hlsl::IntrinsicOp op) {
// Processing the `dot4add_i8packed` and `dot4add_u8packed` intrinsics.
// There is no direct substitution for them in SPIR-V, but the combination
// of OpSDot / OpUDot and OpIAdd works. Note that the OpSDotAccSat and
// OpUDotAccSat operations are not matching the HLSL intrinsics as there
// should not be any saturation.
//
// int32 dot4add_i8packed(uint32 a, uint32 b, int32 acc);
// A 4-dimensional signed integer dot-product with add. Multiplies together
// each corresponding pair of signed 8-bit int bytes in the two input
// DWORDs, and sums the results into the 32-bit signed integer accumulator.
//
// uint32 dot4add_u8packed(uint32 a, uint32 b, uint32 acc);
// A 4-dimensional unsigned integer dot-product with add. Multiplies
// together each corresponding pair of unsigned 8-bit int bytes in the two
// input DWORDs, and sums the results into the 32-bit unsigned integer
// accumulator.
auto loc = callExpr->getExprLoc();
auto range = callExpr->getSourceRange();
assert(op == hlsl::IntrinsicOp::IOP_dot4add_i8packed ||
op == hlsl::IntrinsicOp::IOP_dot4add_u8packed);
// Validate the argument count - if it's wrong, the compiler won't get
// here anyway, so an assert should be fine.
assert(callExpr->getNumArgs() == 3u);
// Prepare the three arguments.
const Expr *arg0 = callExpr->getArg(0);
const Expr *arg1 = callExpr->getArg(1);
const Expr *arg2 = callExpr->getArg(2);
auto *arg0Instr = doExpr(arg0);
auto *arg1Instr = doExpr(arg1);
auto *arg2Instr = doExpr(arg2);
// OpSDot/OpUDot need a Packed Vector Format operand when Vector 1 and
// Vector 2 are scalar integer types.
SpirvConstant *formatConstant = spvBuilder.getConstantInt(
astContext.UnsignedIntTy,
llvm::APInt(32,
uint32_t(spv::PackedVectorFormat::PackedVectorFormat4x8Bit)));
// Make sure that the format is emitted as a literal constant and not
// an instruction reference.
formatConstant->setLiteral(true);
// Prepare the array inputs for createSpirvIntrInstExt below.
// Need to use this function because the OpSDot/OpUDot operations require
// two capabilities and an extension to be declared in the module.
SpirvInstruction *operands[]{arg0Instr, arg1Instr, formatConstant};
uint32_t capabilities[]{
uint32_t(spv::Capability::DotProduct),
uint32_t(spv::Capability::DotProductInput4x8BitPacked)};
llvm::StringRef extensions[]{"SPV_KHR_integer_dot_product"};
llvm::StringRef instSet = "";
// Pick the opcode based on the instruction.
const bool isSigned = op == hlsl::IntrinsicOp::IOP_dot4add_i8packed;
const spv::Op spirvOp = isSigned ? spv::Op::OpSDot : spv::Op::OpUDot;
const auto returnType = callExpr->getType();
// Create the dot product instruction.
auto *dotResult =
spvBuilder.createSpirvIntrInstExt(uint32_t(spirvOp), returnType, operands,
extensions, instSet, capabilities, loc);
// Create and return the integer addition instruction.
return spvBuilder.createBinaryOp(spv::Op::OpIAdd, returnType, dotResult,
arg2Instr, loc, range);
}
SpirvInstruction *SpirvEmitter::processIntrinsicDP2a(const CallExpr *callExpr) {
// Processing the `dot2add` intrinsic.
// There is no direct substitution for it in SPIR-V, so it is recreated with a
// combination of OpDot and OpFAdd.
//
// float dot2add( half2 a, half2 b, float acc );
// A 2-dimensional floating point dot product of half2 vectors with add.
// Multiplies the elements of the two half-precision float input vectors
// together and sums the results into the 32-bit float accumulator.
auto loc = callExpr->getExprLoc();
auto range = callExpr->getSourceRange();
assert(callExpr->getNumArgs() == 3u);
const Expr *arg0 = callExpr->getArg(0);
const Expr *arg1 = callExpr->getArg(1);
const Expr *arg2 = callExpr->getArg(2);
QualType vecType = arg0->getType();
QualType componentType = {};
uint32_t vecSize = {};
bool isVec = isVectorType(vecType, &componentType, &vecSize);
assert(isVec && vecSize == 2);
(void)isVec;
SpirvInstruction *arg0Instr = doExpr(arg0);
SpirvInstruction *arg1Instr = doExpr(arg1);
SpirvInstruction *arg2Instr = doExpr(arg2);
// Create the dot product of the half2 vectors.
SpirvInstruction *dotInstr = spvBuilder.createBinaryOp(
spv::Op::OpDot, componentType, arg0Instr, arg1Instr, loc, range);
// Convert dot product (half type) to result type (float).
QualType resultType = callExpr->getType();
SpirvInstruction *floatDotInstr = spvBuilder.createUnaryOp(
spv::Op::OpFConvert, resultType, dotInstr, loc, range);
// Sum the dot product result and accumulator and return.
return spvBuilder.createBinaryOp(spv::Op::OpFAdd, resultType, floatDotInstr,
arg2Instr, loc, range);
}
SpirvInstruction *
SpirvEmitter::processIntrinsic8BitPack(const CallExpr *callExpr,
hlsl::IntrinsicOp op) {
const auto loc = callExpr->getExprLoc();
assert(op == hlsl::IntrinsicOp::IOP_pack_s8 ||
op == hlsl::IntrinsicOp::IOP_pack_u8 ||
op == hlsl::IntrinsicOp::IOP_pack_clamp_s8 ||
op == hlsl::IntrinsicOp::IOP_pack_clamp_u8);
// Here's the signature for the pack intrinsic operations:
//
// uint8_t4_packed pack_u8(uint32_t4 unpackedVal);
// uint8_t4_packed pack_u8(uint16_t4 unpackedVal);
// int8_t4_packed pack_s8(int32_t4 unpackedVal);
// int8_t4_packed pack_s8(int16_t4 unpackedVal);
//
// These functions take a vec4 of 16-bit or 32-bit integers as input. For each
// element of the vec4, they pick the lower 8 bits, and drop the other bits.
// The result is four 8-bit values (32 bits in total) which are packed in an
// unsigned uint32_t.
//
//
// Here's the signature for the pack_clamp intrinsic operations:
//
// uint8_t4_packed pack_clamp_u8(int32_t4 val); // Pack and Clamp [0, 255]
// uint8_t4_packed pack_clamp_u8(int16_t4 val); // Pack and Clamp [0, 255]
//
// int8_t4_packed pack_clamp_s8(int32_t4 val); // Pack and Clamp [-128, 127]
// int8_t4_packed pack_clamp_s8(int16_t4 val); // Pack and Clamp [-128, 127]
//
// These functions take a vec4 of 16-bit or 32-bit integers as input. For each
// element of the vec4, they first clamp the value to a range (depending on
// the signedness) then pick the lower 8 bits, and drop the other bits.
// The result is four 8-bit values (32 bits in total) which are packed in an
// unsigned uint32_t.
//
// Note: uint8_t4_packed and int8_t4_packed are NOT vector types! They are
// both scalar 32-bit unsigned integer types where each byte represents one
// value.
//
// Note: In pack_clamp_{s|u}8 intrinsics, an input of 0x100 will be turned
// into 0xFF, not 0x00. Therefore, it is important to perform a clamp first,
// and then a truncation.
// Steps:
// Use GLSL extended instruction set's clamp (only for clamp instructions).
// Use OpUConvert/OpSConvert to truncate each element of the vec4 to 8 bits.
// Use OpBitcast to make a 32-bit uint out of the new vec4.
auto *arg = callExpr->getArg(0);
const auto argType = arg->getType();
SpirvInstruction *argInstr = doExpr(arg);
QualType elemType = {};
uint32_t elemCount = 0;
(void)isVectorType(argType, &elemType, &elemCount);
const bool isSigned = elemType->isSignedIntegerType();
assert(elemCount == 4);
const bool doesClamp = op == hlsl::IntrinsicOp::IOP_pack_clamp_s8 ||
op == hlsl::IntrinsicOp::IOP_pack_clamp_u8;
if (doesClamp) {
const auto bitwidth = getElementSpirvBitwidth(
astContext, elemType, spirvOptions.enable16BitTypes);
int32_t clampMin = op == hlsl::IntrinsicOp::IOP_pack_clamp_u8 ? 0 : -128;
int32_t clampMax = op == hlsl::IntrinsicOp::IOP_pack_clamp_u8 ? 255 : 127;
auto *minInstr = spvBuilder.getConstantInt(
elemType, llvm::APInt(bitwidth, clampMin, isSigned));
auto *maxInstr = spvBuilder.getConstantInt(
elemType, llvm::APInt(bitwidth, clampMax, isSigned));
auto *minVec = spvBuilder.getConstantComposite(
argType, {minInstr, minInstr, minInstr, minInstr});
auto *maxVec = spvBuilder.getConstantComposite(
argType, {maxInstr, maxInstr, maxInstr, maxInstr});
auto clampOp = isSigned ? GLSLstd450SClamp : GLSLstd450UClamp;
argInstr = spvBuilder.createGLSLExtInst(argType, clampOp,
{argInstr, minVec, maxVec}, loc);
}
if (isSigned) {
QualType v4Int8Type =
astContext.getExtVectorType(astContext.SignedCharTy, 4);
auto *bytesVecInstr = spvBuilder.createUnaryOp(spv::Op::OpSConvert,
v4Int8Type, argInstr, loc);
return spvBuilder.createUnaryOp(
spv::Op::OpBitcast, astContext.Int8_4PackedTy, bytesVecInstr, loc);
} else {
QualType v4Uint8Type =
astContext.getExtVectorType(astContext.UnsignedCharTy, 4);
auto *bytesVecInstr = spvBuilder.createUnaryOp(spv::Op::OpUConvert,
v4Uint8Type, argInstr, loc);
return spvBuilder.createUnaryOp(
spv::Op::OpBitcast, astContext.UInt8_4PackedTy, bytesVecInstr, loc);
}
}
SpirvInstruction *
SpirvEmitter::processIntrinsic8BitUnpack(const CallExpr *callExpr,
hlsl::IntrinsicOp op) {
const auto loc = callExpr->getExprLoc();
assert(op == hlsl::IntrinsicOp::IOP_unpack_s8s16 ||
op == hlsl::IntrinsicOp::IOP_unpack_s8s32 ||
op == hlsl::IntrinsicOp::IOP_unpack_u8u16 ||
op == hlsl::IntrinsicOp::IOP_unpack_u8u32);
// Here's the signature for the pack intrinsic operations:
//
// int16_t4 unpack_s8s16(int8_t4_packed packedVal); // Sign Extended
// uint16_t4 unpack_u8u16(uint8_t4_packed packedVal); // Non-Sign Extended
// int32_t4 unpack_s8s32(int8_t4_packed packedVal); // Sign Extended
// uint32_t4 unpack_u8u32(uint8_t4_packed packedVal); // Non-Sign Extended
//
// These functions take a 32-bit unsigned integer as input (where each byte of
// the input represents one value, i.e. it's packed). They first unpack the
// 32-bit integer to a vector of 4 bytes. Then for each element of the vec4,
// they zero-extend or sign-extend the byte in order to achieve a 16-bit or
// 32-bit vector of integers.
//
// Note: uint8_t4_packed and int8_t4_packed are NOT vector types! They are
// both scalar 32-bit unsigned integer types where each byte represents one
// value.
// Steps:
// Use OpBitcast to make a vec4 of bytes from a 32-bit value.
// Use OpUConvert/OpSConvert to zero-extend/sign-extend each element of the
// vec4 to 16 or 32 bits.
auto *arg = callExpr->getArg(0);
SpirvInstruction *argInstr = doExpr(arg);
const bool isSigned = op == hlsl::IntrinsicOp::IOP_unpack_s8s16 ||
op == hlsl::IntrinsicOp::IOP_unpack_s8s32;
QualType resultType = {};
if (op == hlsl::IntrinsicOp::IOP_unpack_s8s16 ||
op == hlsl::IntrinsicOp::IOP_unpack_u8u16) {
resultType = astContext.getExtVectorType(
isSigned ? astContext.ShortTy : astContext.UnsignedShortTy, 4);
} else {
resultType = astContext.getExtVectorType(
isSigned ? astContext.IntTy : astContext.UnsignedIntTy, 4);
}
if (isSigned) {
QualType v4Int8Type =
astContext.getExtVectorType(astContext.SignedCharTy, 4);
auto *bytesVecInstr =
spvBuilder.createUnaryOp(spv::Op::OpBitcast, v4Int8Type, argInstr, loc);
return spvBuilder.createUnaryOp(spv::Op::OpSConvert, resultType,
bytesVecInstr, loc);
} else {
QualType v4Uint8Type =
astContext.getExtVectorType(astContext.UnsignedCharTy, 4);
auto *bytesVecInstr = spvBuilder.createUnaryOp(spv::Op::OpBitcast,
v4Uint8Type, argInstr, loc);
return spvBuilder.createUnaryOp(spv::Op::OpUConvert, resultType,
bytesVecInstr, loc);
}
}
SpirvInstruction *SpirvEmitter::processRayBuiltins(const CallExpr *callExpr,
hlsl::IntrinsicOp op) {
bool nvRayTracing =
featureManager.isExtensionEnabled(Extension::NV_ray_tracing);
spv::BuiltIn builtin = spv::BuiltIn::Max;
bool transposeMatrix = false;
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
switch (op) {
case hlsl::IntrinsicOp::IOP_DispatchRaysDimensions:
builtin = spv::BuiltIn::LaunchSizeNV;
break;
case hlsl::IntrinsicOp::IOP_DispatchRaysIndex:
builtin = spv::BuiltIn::LaunchIdNV;
break;
case hlsl::IntrinsicOp::IOP_RayTCurrent:
if (nvRayTracing)
builtin = spv::BuiltIn::HitTNV;
else
builtin = spv::BuiltIn::RayTmaxKHR;
break;
case hlsl::IntrinsicOp::IOP_RayTMin:
builtin = spv::BuiltIn::RayTminNV;
break;
case hlsl::IntrinsicOp::IOP_HitKind:
builtin = spv::BuiltIn::HitKindNV;
break;
case hlsl::IntrinsicOp::IOP_WorldRayDirection:
builtin = spv::BuiltIn::WorldRayDirectionNV;
break;
case hlsl::IntrinsicOp::IOP_WorldRayOrigin:
builtin = spv::BuiltIn::WorldRayOriginNV;
break;
case hlsl::IntrinsicOp::IOP_ObjectRayDirection:
builtin = spv::BuiltIn::ObjectRayDirectionNV;
break;
case hlsl::IntrinsicOp::IOP_ObjectRayOrigin:
builtin = spv::BuiltIn::ObjectRayOriginNV;
break;
case hlsl::IntrinsicOp::IOP_GeometryIndex:
featureManager.requestExtension(Extension::KHR_ray_tracing,
"GeometryIndex()", loc);
builtin = spv::BuiltIn::RayGeometryIndexKHR;
break;
case hlsl::IntrinsicOp::IOP_InstanceIndex:
builtin = spv::BuiltIn::InstanceId;
break;
case hlsl::IntrinsicOp::IOP_PrimitiveIndex:
builtin = spv::BuiltIn::PrimitiveId;
break;
case hlsl::IntrinsicOp::IOP_InstanceID:
builtin = spv::BuiltIn::InstanceCustomIndexNV;
break;
case hlsl::IntrinsicOp::IOP_RayFlags:
builtin = spv::BuiltIn::IncomingRayFlagsNV;
break;
case hlsl::IntrinsicOp::IOP_ObjectToWorld3x4:
transposeMatrix = true;
LLVM_FALLTHROUGH;
case hlsl::IntrinsicOp::IOP_ObjectToWorld4x3:
builtin = spv::BuiltIn::ObjectToWorldNV;
break;
case hlsl::IntrinsicOp::IOP_WorldToObject3x4:
transposeMatrix = true;
LLVM_FALLTHROUGH;
case hlsl::IntrinsicOp::IOP_WorldToObject4x3:
builtin = spv::BuiltIn::WorldToObjectNV;
break;
default:
emitError("ray intrinsic function unimplemented", loc);
return nullptr;
}
needsLegalization = true;
QualType builtinType = callExpr->getType();
if (transposeMatrix) {
// DXR defines ObjectToWorld3x4, WorldToObject3x4 as transposed matrices.
// SPIR-V has only non tranposed variant defined as a builtin
// So perform read of original non transposed builtin and perform transpose.
assert(hlsl::IsHLSLMatType(builtinType) && "Builtin should be matrix");
const clang::Type *type = builtinType.getCanonicalType().getTypePtr();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateSpecDecl =
cast<ClassTemplateSpecializationDecl>(RT->getDecl());
ClassTemplateDecl *templateDecl =
templateSpecDecl->getSpecializedTemplate();
builtinType = getHLSLMatrixType(astContext, theCompilerInstance.getSema(),
templateDecl, astContext.FloatTy, 4, 3);
}
SpirvInstruction *retVal =
declIdMapper.getBuiltinVar(builtin, builtinType, loc);
retVal = spvBuilder.createLoad(builtinType, retVal, loc, range);
if (transposeMatrix)
retVal = spvBuilder.createUnaryOp(spv::Op::OpTranspose, callExpr->getType(),
retVal, loc, range);
return retVal;
}
SpirvInstruction *SpirvEmitter::processReportHit(const CallExpr *callExpr) {
if (callExpr->getNumArgs() != 3) {
emitError("invalid number of arguments to ReportHit",
callExpr->getExprLoc());
}
// HLSL Function :
// template<typename hitAttr>
// ReportHit(in float, in uint, in hitAttr)
const Expr *hitAttr = callExpr->getArg(2);
SpirvInstruction *hitAttributeArgInstr =
doExpr(hitAttr, hitAttr->getExprLoc());
QualType hitAttributeType = hitAttr->getType();
// TODO(#6364): Verify that this behavior is correct.
SpirvInstruction *hitAttributeStageVar;
const auto iter = hitAttributeMap.find(hitAttributeType);
if (iter == hitAttributeMap.end()) {
hitAttributeStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::HitAttributeNV, hitAttributeType,
hitAttributeArgInstr->getDebugName(), hitAttributeArgInstr->isPrecise(),
hitAttributeArgInstr->isNoninterpolated());
hitAttributeMap[hitAttributeType] = hitAttributeStageVar;
} else {
hitAttributeStageVar = iter->second;
}
// Copy argument to stage variable
spvBuilder.createStore(hitAttributeStageVar, hitAttributeArgInstr,
callExpr->getExprLoc());
// SPIR-V Instruction :
// bool OpReportIntersection(<id> float Hit, <id> uint HitKind)
llvm::SmallVector<SpirvInstruction *, 4> reportHitArgs;
reportHitArgs.push_back(doExpr(callExpr->getArg(0))); // Hit
reportHitArgs.push_back(doExpr(callExpr->getArg(1))); // HitKind
return spvBuilder.createRayTracingOpsNV(spv::Op::OpReportIntersectionNV,
astContext.BoolTy, reportHitArgs,
callExpr->getExprLoc());
}
void SpirvEmitter::processCallShader(const CallExpr *callExpr) {
bool nvRayTracing =
featureManager.isExtensionEnabled(Extension::NV_ray_tracing);
SpirvInstruction *callDataLocInst = nullptr;
SpirvInstruction *callDataStageVar = nullptr;
const VarDecl *callDataArg = nullptr;
QualType callDataType;
const auto args = callExpr->getArgs();
if (callExpr->getNumArgs() != 2) {
emitError("invalid number of arguments to CallShader",
callExpr->getExprLoc());
}
// HLSL Func :
// template<typename CallData>
// void CallShader(in int sbtIndex, inout CallData arg)
if (const auto *implCastExpr = dyn_cast<CastExpr>(args[1])) {
if (const auto *arg = dyn_cast<DeclRefExpr>(implCastExpr->getSubExpr())) {
if (const auto *varDecl = dyn_cast<VarDecl>(arg->getDecl())) {
callDataType = varDecl->getType();
callDataArg = varDecl;
// Check if same type of callable data stage variable was already
// created, if so re-use
const auto callDataPair = callDataMap.find(callDataType);
if (callDataPair == callDataMap.end()) {
int numCallDataVars = callDataMap.size();
callDataStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::CallableDataNV, varDecl);
// Decorate unique location id for each created stage var
spvBuilder.decorateLocation(callDataStageVar, numCallDataVars);
callDataLocInst = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, numCallDataVars));
callDataMap[callDataType] =
std::make_pair(callDataStageVar, callDataLocInst);
} else {
callDataStageVar = callDataPair->second.first;
callDataLocInst = callDataPair->second.second;
}
}
}
}
assert(callDataStageVar && callDataArg);
// Copy argument to stage variable
const auto callDataArgInst =
declIdMapper.getDeclEvalInfo(callDataArg, callExpr->getExprLoc());
auto tempLoad = spvBuilder.createLoad(callDataArg->getType(), callDataArgInst,
callDataArg->getLocStart());
spvBuilder.createStore(callDataStageVar, tempLoad, callExpr->getExprLoc());
// SPIR-V Instruction
// void OpExecuteCallable(<id> int SBT Index, <id> uint Callable Data Location
// Id)
llvm::SmallVector<SpirvInstruction *, 2> callShaderArgs;
callShaderArgs.push_back(doExpr(args[0]));
if (nvRayTracing) {
callShaderArgs.push_back(callDataLocInst);
spvBuilder.createRayTracingOpsNV(spv::Op::OpExecuteCallableNV, QualType(),
callShaderArgs, callExpr->getExprLoc());
} else {
callShaderArgs.push_back(callDataStageVar);
spvBuilder.createRayTracingOpsNV(spv::Op::OpExecuteCallableKHR, QualType(),
callShaderArgs, callExpr->getExprLoc());
}
// Copy data back to argument
tempLoad = spvBuilder.createLoad(callDataArg->getType(), callDataStageVar,
callDataArg->getLocStart());
spvBuilder.createStore(callDataArgInst, tempLoad, callExpr->getExprLoc());
return;
}
void SpirvEmitter::processTraceRay(const CallExpr *callExpr) {
bool nvRayTracing =
featureManager.isExtensionEnabled(Extension::NV_ray_tracing);
SpirvInstruction *rayPayloadLocInst = nullptr;
SpirvInstruction *rayPayloadStageVar = nullptr;
const VarDecl *rayPayloadArg = nullptr;
QualType rayPayloadType;
const auto args = callExpr->getArgs();
if (callExpr->getNumArgs() != 8) {
emitError("invalid number of arguments to TraceRay",
callExpr->getExprLoc());
}
// HLSL Func
// template<typename RayPayload>
// void TraceRay(RaytracingAccelerationStructure rs,
// uint rayflags,
// uint InstanceInclusionMask
// uint RayContributionToHitGroupIndex,
// uint MultiplierForGeometryContributionToHitGroupIndex,
// uint MissShaderIndex,
// RayDesc ray,
// inout RayPayload p)
// where RayDesc = {float3 origin, float tMin, float3 direction, float tMax}
if (const auto *implCastExpr = dyn_cast<CastExpr>(args[7])) {
if (const auto *arg = dyn_cast<DeclRefExpr>(implCastExpr->getSubExpr())) {
if (const auto *varDecl = dyn_cast<VarDecl>(arg->getDecl())) {
rayPayloadType = varDecl->getType();
rayPayloadArg = varDecl;
const auto rayPayloadPair = rayPayloadMap.find(rayPayloadType);
// Check if same type of rayPayload stage variable was already
// created, if so re-use
if (rayPayloadPair == rayPayloadMap.end()) {
int numPayloadVars = rayPayloadMap.size();
rayPayloadStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::RayPayloadNV, varDecl);
// Decorate unique location id for each created stage var
spvBuilder.decorateLocation(rayPayloadStageVar, numPayloadVars);
rayPayloadLocInst = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, numPayloadVars));
rayPayloadMap[rayPayloadType] =
std::make_pair(rayPayloadStageVar, rayPayloadLocInst);
} else {
rayPayloadStageVar = rayPayloadPair->second.first;
rayPayloadLocInst = rayPayloadPair->second.second;
}
}
}
}
assert(rayPayloadStageVar && rayPayloadArg);
const auto floatType = astContext.FloatTy;
const auto vecType = astContext.getExtVectorType(astContext.FloatTy, 3);
// Extract the ray description to match SPIR-V
SpirvInstruction *rayDescArg = doExpr(args[6]);
const auto loc = args[6]->getLocStart();
const auto origin =
spvBuilder.createCompositeExtract(vecType, rayDescArg, {0}, loc);
const auto tMin =
spvBuilder.createCompositeExtract(floatType, rayDescArg, {1}, loc);
const auto direction =
spvBuilder.createCompositeExtract(vecType, rayDescArg, {2}, loc);
const auto tMax =
spvBuilder.createCompositeExtract(floatType, rayDescArg, {3}, loc);
// Copy argument to stage variable
const auto rayPayloadArgInst =
declIdMapper.getDeclEvalInfo(rayPayloadArg, rayPayloadArg->getLocStart());
auto tempLoad =
spvBuilder.createLoad(rayPayloadArg->getType(), rayPayloadArgInst,
rayPayloadArg->getLocStart());
spvBuilder.createStore(rayPayloadStageVar, tempLoad, callExpr->getExprLoc());
// SPIR-V Instruction
// void OpTraceNV ( <id> AccelerationStructureNV acStruct,
// <id> uint Ray Flags,
// <id> uint Cull Mask,
// <id> uint SBT Offset,
// <id> uint SBT Stride,
// <id> uint Miss Index,
// <id> vec4 Ray Origin,
// <id> float Ray Tmin,
// <id> vec3 Ray Direction,
// <id> float Ray Tmax,
// <id> uint RayPayload number)
llvm::SmallVector<SpirvInstruction *, 8> traceArgs;
for (int ii = 0; ii < 6; ii++) {
traceArgs.push_back(doExpr(args[ii]));
}
traceArgs.push_back(origin);
traceArgs.push_back(tMin);
traceArgs.push_back(direction);
traceArgs.push_back(tMax);
if (nvRayTracing) {
traceArgs.push_back(rayPayloadLocInst);
spvBuilder.createRayTracingOpsNV(spv::Op::OpTraceNV, QualType(), traceArgs,
callExpr->getExprLoc());
} else {
traceArgs.push_back(rayPayloadStageVar);
spvBuilder.createRayTracingOpsNV(spv::Op::OpTraceRayKHR, QualType(),
traceArgs, callExpr->getExprLoc());
}
// Copy arguments back to stage variable
tempLoad = spvBuilder.createLoad(rayPayloadArg->getType(), rayPayloadStageVar,
rayPayloadArg->getLocStart());
spvBuilder.createStore(rayPayloadArgInst, tempLoad, callExpr->getExprLoc());
return;
}
void SpirvEmitter::processDispatchMesh(const CallExpr *callExpr) {
// HLSL Func - void DispatchMesh(uint ThreadGroupCountX,
// uint ThreadGroupCountY,
// uint ThreadGroupCountZ,
// groupshared <structType> MeshPayload);
assert(callExpr->getNumArgs() == 4);
const auto args = callExpr->getArgs();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
// 1) create a barrier GroupMemoryBarrierWithGroupSync().
processIntrinsicMemoryBarrier(callExpr,
/*isDevice*/ false,
/*groupSync*/ true,
/*isAllBarrier*/ false);
// 2) create PerTaskNV out attribute block and store MeshPayload info.
const auto *sigPoint =
hlsl::SigPoint::GetSigPoint(hlsl::DXIL::SigPointKind::MSOut);
spv::StorageClass sc =
featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)
? spv::StorageClass::TaskPayloadWorkgroupEXT
: spv::StorageClass::Output;
auto *payloadArg = doExpr(args[3]);
bool isValid = false;
const VarDecl *param = nullptr;
if (const auto *implCastExpr = dyn_cast<CastExpr>(args[3])) {
if (const auto *arg = dyn_cast<DeclRefExpr>(implCastExpr->getSubExpr())) {
if (const auto *paramDecl = dyn_cast<VarDecl>(arg->getDecl())) {
if (paramDecl->hasAttr<HLSLGroupSharedAttr>()) {
isValid = declIdMapper.createPayloadStageVars(
sigPoint, sc, paramDecl, /*asInput=*/false, paramDecl->getType(),
"out.var", &payloadArg);
param = paramDecl;
}
}
}
}
if (!isValid) {
emitError("expected groupshared object as argument to DispatchMesh()",
args[3]->getExprLoc());
}
// 3) set up emit dimension.
auto *threadX = doExpr(args[0]);
auto *threadY = doExpr(args[1]);
auto *threadZ = doExpr(args[2]);
if (featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
// for EXT_mesh_shader, create opEmitMeshTasksEXT.
spvBuilder.createEmitMeshTasksEXT(threadX, threadY, threadZ, loc, nullptr,
range);
} else {
// for NV_mesh_shader, set TaskCountNV = threadX * threadY * threadZ.
auto *var = declIdMapper.getBuiltinVar(spv::BuiltIn::TaskCountNV,
astContext.UnsignedIntTy, loc);
auto *taskCount = spvBuilder.createBinaryOp(
spv::Op::OpIMul, astContext.UnsignedIntTy, threadX,
spvBuilder.createBinaryOp(spv::Op::OpIMul, astContext.UnsignedIntTy,
threadY, threadZ, loc, range),
loc, range);
spvBuilder.createStore(var, taskCount, loc, range);
}
}
void SpirvEmitter::processMeshOutputCounts(const CallExpr *callExpr) {
// HLSL Func - void SetMeshOutputCounts(uint numVertices, uint numPrimitives);
assert(callExpr->getNumArgs() == 2);
const auto args = callExpr->getArgs();
const auto loc = callExpr->getExprLoc();
const auto range = callExpr->getSourceRange();
if (featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
spvBuilder.createSetMeshOutputsEXT(doExpr(args[0]), doExpr(args[1]), loc,
range);
} else {
auto *var = declIdMapper.getBuiltinVar(spv::BuiltIn::PrimitiveCountNV,
astContext.UnsignedIntTy, loc);
spvBuilder.createStore(var, doExpr(args[1]), loc, range);
}
}
SpirvInstruction *
SpirvEmitter::processGetAttributeAtVertex(const CallExpr *expr) {
if (!spvContext.isPS()) {
emitError("GetAttributeAtVertex only allowed in pixel shader",
expr->getExprLoc());
return nullptr;
}
// Implicit type conversion should bound to two things:
// 1. Function Parameter, and recursively redecl mapped function called's var
// types.
// 2. User defined structure types, which may be used in function local
// variables (AccessChain add index 0 at end)
const auto exprLoc = expr->getExprLoc();
const auto exprRange = expr->getSourceRange();
// arg1 : vertexId
auto *arg1BaseExpr = doExpr(expr->getArg(1));
// arg0 : <NoInterpolation> decorated input
// Tip : for input with boolean type, we need to ignore implicit cast first,
// to match surrounded workaround cast expr.
auto *arg0NoCast = expr->getArg(0)->IgnoreCasts();
SpirvInstruction *paramDeclInstr = doExpr(arg0NoCast);
// Hans't be redeclared before
QualType elementType = paramDeclInstr->getAstResultType();
if (isBoolOrVecOfBoolType(elementType)) {
emitError("attribute evaluation can only be done "
"on values taken directly from inputs.",
{});
}
// Change to access chain instr
SpirvInstruction *accessChainPtr = paramDeclInstr;
if (isa<SpirvAccessChain>(accessChainPtr)) {
auto *accessInstr = dyn_cast<SpirvAccessChain>(accessChainPtr);
accessInstr->insertIndex(arg1BaseExpr, accessInstr->getIndexes().size());
} else
accessChainPtr = spvBuilder.createAccessChain(
elementType, accessChainPtr, arg1BaseExpr, exprLoc, exprRange);
dyn_cast<SpirvAccessChain>(accessChainPtr)->setNoninterpolated(false);
auto *loadPtr =
spvBuilder.createLoad(elementType, accessChainPtr, exprLoc, exprRange);
// PerVertexKHR Decorator and type redecl will be done in later pervertex
// visitor.
spvBuilder.setPerVertexInterpMode(true);
return loadPtr;
}
SpirvConstant *SpirvEmitter::getValueZero(QualType type) {
{
QualType scalarType = {};
if (isScalarType(type, &scalarType)) {
if (scalarType->isBooleanType()) {
return spvBuilder.getConstantBool(false);
}
if (scalarType->isIntegerType()) {
return spvBuilder.getConstantInt(scalarType, llvm::APInt(32, 0));
}
if (scalarType->isFloatingType()) {
return spvBuilder.getConstantFloat(scalarType, llvm::APFloat(0.0f));
}
}
}
{
QualType elemType = {};
uint32_t size = {};
if (isVectorType(type, &elemType, &size)) {
return getVecValueZero(elemType, size);
}
}
{
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
auto *row = getVecValueZero(elemType, colCount);
llvm::SmallVector<SpirvConstant *, 4> rows((size_t)rowCount, row);
return spvBuilder.getConstantComposite(type, rows);
}
}
emitError("getting value 0 for type %0 unimplemented", {})
<< type.getAsString();
return nullptr;
}
SpirvConstant *SpirvEmitter::getVecValueZero(QualType elemType, uint32_t size) {
auto *elemZeroId = getValueZero(elemType);
if (size == 1)
return elemZeroId;
llvm::SmallVector<SpirvConstant *, 4> elements(size_t(size), elemZeroId);
const QualType vecType = astContext.getExtVectorType(elemType, size);
return spvBuilder.getConstantComposite(vecType, elements);
}
SpirvConstant *SpirvEmitter::getValueOne(QualType type) {
{
QualType scalarType = {};
if (isScalarType(type, &scalarType)) {
if (scalarType->isBooleanType()) {
return spvBuilder.getConstantBool(true);
}
if (scalarType->isIntegerType()) {
return spvBuilder.getConstantInt(scalarType, llvm::APInt(32, 1));
}
if (scalarType->isFloatingType()) {
return spvBuilder.getConstantFloat(scalarType, llvm::APFloat(1.0f));
}
}
}
{
QualType elemType = {};
uint32_t size = {};
if (isVectorType(type, &elemType, &size)) {
return getVecValueOne(elemType, size);
}
}
emitError("getting value 1 for type %0 unimplemented", {}) << type;
return 0;
}
SpirvConstant *SpirvEmitter::getVecValueOne(QualType elemType, uint32_t size) {
auto *elemOne = getValueOne(elemType);
if (size == 1)
return elemOne;
llvm::SmallVector<SpirvConstant *, 4> elements(size_t(size), elemOne);
const QualType vecType = astContext.getExtVectorType(elemType, size);
return spvBuilder.getConstantComposite(vecType, elements);
}
SpirvConstant *SpirvEmitter::getMatElemValueOne(QualType type) {
assert(hlsl::IsHLSLMatType(type));
const auto elemType = hlsl::GetHLSLMatElementType(type);
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (rowCount == 1 && colCount == 1)
return getValueOne(elemType);
if (colCount == 1)
return getVecValueOne(elemType, rowCount);
return getVecValueOne(elemType, colCount);
}
SpirvConstant *SpirvEmitter::getMaskForBitwidthValue(QualType type) {
QualType elemType = {};
uint32_t count = 1;
if (isScalarType(type, &elemType) || isVectorType(type, &elemType, &count)) {
const auto bitwidth = getElementSpirvBitwidth(
astContext, elemType, spirvOptions.enable16BitTypes);
SpirvConstant *mask = spvBuilder.getConstantInt(
elemType,
llvm::APInt(bitwidth, bitwidth - 1, elemType->isSignedIntegerType()));
if (count == 1)
return mask;
const QualType resultType = astContext.getExtVectorType(elemType, count);
llvm::SmallVector<SpirvConstant *, 4> elements(size_t(count), mask);
return spvBuilder.getConstantComposite(resultType, elements);
}
assert(false && "this method only supports scalars and vectors");
return nullptr;
}
hlsl::ShaderModel::Kind SpirvEmitter::getShaderModelKind(StringRef stageName) {
hlsl::ShaderModel::Kind SMK =
llvm::StringSwitch<hlsl::ShaderModel::Kind>(stageName)
.Case("pixel", hlsl::ShaderModel::Kind::Pixel)
.Case("vertex", hlsl::ShaderModel::Kind::Vertex)
.Case("geometry", hlsl::ShaderModel::Kind::Geometry)
.Case("hull", hlsl::ShaderModel::Kind::Hull)
.Case("domain", hlsl::ShaderModel::Kind::Domain)
.Case("compute", hlsl::ShaderModel::Kind::Compute)
.Case("raygeneration", hlsl::ShaderModel::Kind::RayGeneration)
.Case("intersection", hlsl::ShaderModel::Kind::Intersection)
.Case("anyhit", hlsl::ShaderModel::Kind::AnyHit)
.Case("closesthit", hlsl::ShaderModel::Kind::ClosestHit)
.Case("miss", hlsl::ShaderModel::Kind::Miss)
.Case("callable", hlsl::ShaderModel::Kind::Callable)
.Case("mesh", hlsl::ShaderModel::Kind::Mesh)
.Case("amplification", hlsl::ShaderModel::Kind::Amplification)
.Default(hlsl::ShaderModel::Kind::Invalid);
assert(SMK != hlsl::ShaderModel::Kind::Invalid);
return SMK;
}
spv::ExecutionModel
SpirvEmitter::getSpirvShaderStage(hlsl::ShaderModel::Kind smk,
bool extMeshShading) {
switch (smk) {
case hlsl::ShaderModel::Kind::Vertex:
return spv::ExecutionModel::Vertex;
case hlsl::ShaderModel::Kind::Hull:
return spv::ExecutionModel::TessellationControl;
case hlsl::ShaderModel::Kind::Domain:
return spv::ExecutionModel::TessellationEvaluation;
case hlsl::ShaderModel::Kind::Geometry:
return spv::ExecutionModel::Geometry;
case hlsl::ShaderModel::Kind::Pixel:
return spv::ExecutionModel::Fragment;
case hlsl::ShaderModel::Kind::Compute:
return spv::ExecutionModel::GLCompute;
case hlsl::ShaderModel::Kind::RayGeneration:
return spv::ExecutionModel::RayGenerationNV;
case hlsl::ShaderModel::Kind::Intersection:
return spv::ExecutionModel::IntersectionNV;
case hlsl::ShaderModel::Kind::AnyHit:
return spv::ExecutionModel::AnyHitNV;
case hlsl::ShaderModel::Kind::ClosestHit:
return spv::ExecutionModel::ClosestHitNV;
case hlsl::ShaderModel::Kind::Miss:
return spv::ExecutionModel::MissNV;
case hlsl::ShaderModel::Kind::Callable:
return spv::ExecutionModel::CallableNV;
case hlsl::ShaderModel::Kind::Mesh:
return extMeshShading ? spv::ExecutionModel::MeshEXT
: spv::ExecutionModel::MeshNV;
case hlsl::ShaderModel::Kind::Amplification:
return extMeshShading ? spv::ExecutionModel::TaskEXT
: spv::ExecutionModel::TaskNV;
default:
llvm_unreachable("invalid shader model kind");
break;
}
}
void SpirvEmitter::processInlineSpirvAttributes(const FunctionDecl *decl) {
if (!decl->hasAttrs())
return;
for (auto &attr : decl->getAttrs()) {
if (auto *modeAttr = dyn_cast<VKSpvExecutionModeAttr>(attr)) {
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode(modeAttr->getExecutionMode()), {},
modeAttr->getLocation());
}
}
// Handle extension and capability attrs
if (decl->hasAttr<VKExtensionExtAttr>() ||
decl->hasAttr<VKCapabilityExtAttr>()) {
createSpirvIntrInstExt(decl->getAttrs(), QualType(), /* spvArgs */ {},
/* isInst */ false, decl->getLocStart());
}
}
bool SpirvEmitter::processGeometryShaderAttributes(const FunctionDecl *decl,
uint32_t *arraySize) {
bool success = true;
assert(spvContext.isGS());
if (auto *vcAttr = decl->getAttr<HLSLMaxVertexCountAttr>()) {
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode::OutputVertices,
{static_cast<uint32_t>(vcAttr->getCount())}, decl->getLocation());
}
uint32_t invocations = 1;
if (auto *instanceAttr = decl->getAttr<HLSLInstanceAttr>()) {
invocations = static_cast<uint32_t>(instanceAttr->getCount());
}
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::Invocations,
{invocations}, decl->getLocation());
// Only one primitive type is permitted for the geometry shader.
bool outPoint = false, outLine = false, outTriangle = false, inPoint = false,
inLine = false, inTriangle = false, inLineAdj = false,
inTriangleAdj = false;
for (const auto *param : decl->params()) {
// Add an execution mode based on the output stream type. Do not an
// execution mode more than once.
if (param->hasAttr<HLSLInOutAttr>()) {
const auto paramType = param->getType();
if (hlsl::IsHLSLTriangleStreamType(paramType) && !outTriangle) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::OutputTriangleStrip, {},
param->getLocation());
outTriangle = true;
} else if (hlsl::IsHLSLLineStreamType(paramType) && !outLine) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::OutputLineStrip, {},
param->getLocation());
outLine = true;
} else if (hlsl::IsHLSLPointStreamType(paramType) && !outPoint) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::OutputPoints, {},
param->getLocation());
outPoint = true;
}
// An output stream parameter will not have the input primitive type
// attributes, so we can continue to the next parameter.
continue;
}
// Add an execution mode based on the input primitive type. Do not add an
// execution mode more than once.
if (param->hasAttr<HLSLPointAttr>() && !inPoint) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::InputPoints, {},
param->getLocation());
*arraySize = 1;
inPoint = true;
} else if (param->hasAttr<HLSLLineAttr>() && !inLine) {
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::InputLines,
{}, param->getLocation());
*arraySize = 2;
inLine = true;
} else if (param->hasAttr<HLSLTriangleAttr>() && !inTriangle) {
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::Triangles,
{}, param->getLocation());
*arraySize = 3;
inTriangle = true;
} else if (param->hasAttr<HLSLLineAdjAttr>() && !inLineAdj) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::InputLinesAdjacency, {},
param->getLocation());
*arraySize = 4;
inLineAdj = true;
} else if (param->hasAttr<HLSLTriangleAdjAttr>() && !inTriangleAdj) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::InputTrianglesAdjacency,
{}, param->getLocation());
*arraySize = 6;
inTriangleAdj = true;
}
}
if (inPoint + inLine + inLineAdj + inTriangle + inTriangleAdj > 1) {
emitError("only one input primitive type can be specified in the geometry "
"shader",
{});
success = false;
}
if (outPoint + outTriangle + outLine > 1) {
emitError("only one output primitive type can be specified in the geometry "
"shader",
{});
success = false;
}
return success;
}
void SpirvEmitter::processPixelShaderAttributes(const FunctionDecl *decl) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::OriginUpperLeft, {},
decl->getLocation());
if (decl->getAttr<HLSLEarlyDepthStencilAttr>()) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::EarlyFragmentTests, {},
decl->getLocation());
}
if (decl->getAttr<VKPostDepthCoverageAttr>()) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::PostDepthCoverage, {},
decl->getLocation());
}
if (decl->getAttr<VKEarlyAndLateTestsAttr>()) {
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode::EarlyAndLateFragmentTestsAMD, {},
decl->getLocation());
}
if (decl->getAttr<VKDepthUnchangedAttr>()) {
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::DepthUnchanged, {},
decl->getLocation());
}
// Shaders must not specify more than one of stencil_ref_unchanged_front,
// stencil_ref_greater_equal_front, and stencil_ref_less_equal_front.
// Shaders must not specify more than one of stencil_ref_unchanged_back,
// stencil_ref_greater_equal_back,and stencil_ref_less_equal_back.
uint32_t stencilFrontAttrCount = 0, stencilBackAttrCount = 0;
if (decl->getAttr<VKStencilRefUnchangedFrontAttr>()) {
++stencilFrontAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefUnchangedFrontAMD,
{}, decl->getLocation());
}
if (decl->getAttr<VKStencilRefGreaterEqualFrontAttr>()) {
++stencilFrontAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefGreaterFrontAMD,
{}, decl->getLocation());
}
if (decl->getAttr<VKStencilRefLessEqualFrontAttr>()) {
++stencilFrontAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefLessFrontAMD, {},
decl->getLocation());
}
if (decl->getAttr<VKStencilRefUnchangedBackAttr>()) {
++stencilBackAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefUnchangedBackAMD,
{}, decl->getLocation());
}
if (decl->getAttr<VKStencilRefGreaterEqualBackAttr>()) {
++stencilBackAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefGreaterBackAMD,
{}, decl->getLocation());
}
if (decl->getAttr<VKStencilRefLessEqualBackAttr>()) {
++stencilBackAttrCount;
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::StencilRefLessBackAMD, {},
decl->getLocation());
}
if (stencilFrontAttrCount > 1) {
emitError("Shaders must not specify more than one of "
"stencil_ref_unchanged_front, stencil_ref_greater_equal_front, "
"and stencil_ref_less_equal_front.",
{});
}
if (stencilBackAttrCount > 1) {
emitError(
"Shaders must not specify more than one of stencil_ref_unchanged_back, "
"stencil_ref_greater_equal_back, and stencil_ref_less_equal_back.",
{});
}
}
void SpirvEmitter::processComputeShaderAttributes(const FunctionDecl *decl) {
auto *numThreadsAttr = decl->getAttr<HLSLNumThreadsAttr>();
assert(numThreadsAttr && "thread group size missing from entry-point");
uint32_t x = static_cast<uint32_t>(numThreadsAttr->getX());
uint32_t y = static_cast<uint32_t>(numThreadsAttr->getY());
uint32_t z = static_cast<uint32_t>(numThreadsAttr->getZ());
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::LocalSize,
{x, y, z}, decl->getLocation());
auto *waveSizeAttr = decl->getAttr<HLSLWaveSizeAttr>();
if (waveSizeAttr) {
// Not supported in Vulkan SPIR-V, warn and ignore.
// SPIR-V SubgroupSize execution mode would work but it is Kernel only
// (requires the SubgroupDispatch capability, which implies the
// DeviceEnqueue capability, which is Kernel only). Subgroup sizes can be
// specified in Vulkan on the application side via
// VK_EXT_subgroup_size_control.
emitWarning("Wave size is not supported by Vulkan SPIR-V. Consider using "
"VK_EXT_subgroup_size_control.",
waveSizeAttr->getLocation());
}
}
bool SpirvEmitter::processTessellationShaderAttributes(
const FunctionDecl *decl, uint32_t *numOutputControlPoints) {
assert(spvContext.isHS() || spvContext.isDS());
using namespace spv;
if (auto *domain = decl->getAttr<HLSLDomainAttr>()) {
const auto domainType = domain->getDomainType().lower();
const ExecutionMode hsExecMode =
llvm::StringSwitch<ExecutionMode>(domainType)
.Case("tri", ExecutionMode::Triangles)
.Case("quad", ExecutionMode::Quads)
.Case("isoline", ExecutionMode::Isolines)
.Default(ExecutionMode::Max);
if (hsExecMode == ExecutionMode::Max) {
emitError("unknown domain type specified for entry function",
domain->getLocation());
return false;
}
spvBuilder.addExecutionMode(entryFunction, hsExecMode, {},
decl->getLocation());
}
// Early return for domain shaders as domain shaders only takes the 'domain'
// attribute.
if (spvContext.isDS())
return true;
if (auto *partitioning = decl->getAttr<HLSLPartitioningAttr>()) {
const auto scheme = partitioning->getScheme().lower();
if (scheme == "pow2") {
emitError("pow2 partitioning scheme is not supported since there is no "
"equivalent in Vulkan",
partitioning->getLocation());
return false;
}
const ExecutionMode hsExecMode =
llvm::StringSwitch<ExecutionMode>(scheme)
.Case("fractional_even", ExecutionMode::SpacingFractionalEven)
.Case("fractional_odd", ExecutionMode::SpacingFractionalOdd)
.Case("integer", ExecutionMode::SpacingEqual)
.Default(ExecutionMode::Max);
if (hsExecMode == ExecutionMode::Max) {
emitError("unknown partitioning scheme in hull shader",
partitioning->getLocation());
return false;
}
spvBuilder.addExecutionMode(entryFunction, hsExecMode, {},
decl->getLocation());
}
if (auto *outputTopology = decl->getAttr<HLSLOutputTopologyAttr>()) {
const auto topology = outputTopology->getTopology().lower();
const ExecutionMode hsExecMode =
llvm::StringSwitch<ExecutionMode>(topology)
.Case("point", ExecutionMode::PointMode)
.Case("triangle_cw", ExecutionMode::VertexOrderCw)
.Case("triangle_ccw", ExecutionMode::VertexOrderCcw)
.Default(ExecutionMode::Max);
// TODO: There is no SPIR-V equivalent for "line" topology. Is it the
// default?
if (topology != "line") {
if (hsExecMode != spv::ExecutionMode::Max) {
spvBuilder.addExecutionMode(entryFunction, hsExecMode, {},
decl->getLocation());
} else {
emitError("unknown output topology in hull shader",
outputTopology->getLocation());
return false;
}
}
}
if (auto *controlPoints = decl->getAttr<HLSLOutputControlPointsAttr>()) {
*numOutputControlPoints = controlPoints->getCount();
spvBuilder.addExecutionMode(entryFunction,
spv::ExecutionMode::OutputVertices,
{*numOutputControlPoints}, decl->getLocation());
}
if (auto *pcf = decl->getAttr<HLSLPatchConstantFuncAttr>()) {
llvm::StringRef pcf_name = pcf->getFunctionName();
for (auto *decl : astContext.getTranslationUnitDecl()->decls())
if (auto *funcDecl = dyn_cast<FunctionDecl>(decl))
if (astContext.IsPatchConstantFunctionDecl(funcDecl) &&
funcDecl->getName() == pcf_name)
patchConstFunc = funcDecl;
}
return true;
}
bool SpirvEmitter::emitEntryFunctionWrapperForRayTracing(
const FunctionDecl *decl, SpirvFunction *entryFuncInstr) {
// The entry basic block.
auto *entryLabel = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(entryLabel);
// Initialize all global variables at the beginning of the wrapper
for (const VarDecl *varDecl : toInitGloalVars) {
const auto varInfo =
declIdMapper.getDeclEvalInfo(varDecl, varDecl->getLocation());
if (const auto *init = varDecl->getInit()) {
storeValue(varInfo, loadIfGLValue(init), varDecl->getType(),
init->getLocStart());
// Update counter variable associated with global variables
tryToAssignCounterVar(varDecl, init);
}
// If not explicitly initialized, initialize with their zero values if not
// resource objects
else if (!hlsl::IsHLSLResourceType(varDecl->getType())) {
auto *nullValue = spvBuilder.getConstantNull(varDecl->getType());
spvBuilder.createStore(varInfo, nullValue, varDecl->getLocation());
}
}
// Create temporary variables for holding function call arguments
llvm::SmallVector<SpirvInstruction *, 4> params;
llvm::SmallVector<QualType, 4> paramTypes;
llvm::SmallVector<SpirvInstruction *, 4> stageVars;
hlsl::ShaderModel::Kind sKind = spvContext.getCurrentShaderModelKind();
for (uint32_t i = 0; i < decl->getNumParams(); i++) {
const auto param = decl->getParamDecl(i);
const auto paramType = param->getType();
std::string tempVarName = "param.var." + param->getNameAsString();
auto *tempVar =
spvBuilder.addFnVar(paramType, param->getLocation(), tempVarName,
param->hasAttr<HLSLPreciseAttr>(),
param->hasAttr<HLSLNoInterpolationAttr>());
SpirvVariable *curStageVar = nullptr;
params.push_back(tempVar);
paramTypes.push_back(paramType);
// Order of arguments is fixed
// Any-Hit/Closest-Hit : Arg 0 = rayPayload(inout), Arg1 = attribute(in)
// Miss : Arg 0 = rayPayload(inout)
// Callable : Arg 0 = callable data(inout)
// Raygeneration/Intersection : No Args allowed
if (sKind == hlsl::ShaderModel::Kind::RayGeneration) {
assert("Raygeneration shaders have no arguments of entry function");
} else if (sKind == hlsl::ShaderModel::Kind::Intersection) {
assert("Intersection shaders have no arguments of entry function");
} else if (sKind == hlsl::ShaderModel::Kind::ClosestHit ||
sKind == hlsl::ShaderModel::Kind::AnyHit) {
// Generate rayPayloadInNV and hitAttributeNV stage variables
if (i == 0) {
// First argument is always rayPayload
curStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::IncomingRayPayloadNV, param);
currentRayPayload = curStageVar;
} else {
// Second argument is always attribute
curStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::HitAttributeNV, param);
}
} else if (sKind == hlsl::ShaderModel::Kind::Miss) {
// Generate rayPayloadInNV stage variable
// First and only argument is rayPayload
curStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::IncomingRayPayloadNV, param);
} else if (sKind == hlsl::ShaderModel::Kind::Callable) {
curStageVar = declIdMapper.createRayTracingNVStageVar(
spv::StorageClass::IncomingCallableDataNV, param);
}
if (curStageVar != nullptr) {
stageVars.push_back(curStageVar);
// Copy data to temporary
auto *tempLoadInst =
spvBuilder.createLoad(paramType, curStageVar, param->getLocation());
spvBuilder.createStore(tempVar, tempLoadInst, param->getLocation());
}
}
// Call the original entry function
const QualType retType = decl->getReturnType();
spvBuilder.createFunctionCall(retType, entryFuncInstr, params,
decl->getLocStart());
// Write certain output variables back
if (sKind == hlsl::ShaderModel::Kind::ClosestHit ||
sKind == hlsl::ShaderModel::Kind::AnyHit ||
sKind == hlsl::ShaderModel::Kind::Miss ||
sKind == hlsl::ShaderModel::Kind::Callable) {
// Write back results to IncomingRayPayloadNV/IncomingCallableDataNV
auto *tempLoad = spvBuilder.createLoad(paramTypes[0], params[0],
decl->getBody()->getLocEnd());
spvBuilder.createStore(stageVars[0], tempLoad,
decl->getBody()->getLocEnd());
}
spvBuilder.createReturn(decl->getBody()->getLocEnd());
spvBuilder.endFunction();
return true;
}
bool SpirvEmitter::processMeshOrAmplificationShaderAttributes(
const FunctionDecl *decl, uint32_t *outVerticesArraySize) {
if (auto *numThreadsAttr = decl->getAttr<HLSLNumThreadsAttr>()) {
uint32_t x, y, z;
x = static_cast<uint32_t>(numThreadsAttr->getX());
y = static_cast<uint32_t>(numThreadsAttr->getY());
z = static_cast<uint32_t>(numThreadsAttr->getZ());
spvBuilder.addExecutionMode(entryFunction, spv::ExecutionMode::LocalSize,
{x, y, z}, decl->getLocation());
}
// Early return for amplification shaders as they only take the 'numthreads'
// attribute.
if (spvContext.isAS())
return true;
spv::ExecutionMode outputPrimitive = spv::ExecutionMode::Max;
if (auto *outputTopology = decl->getAttr<HLSLOutputTopologyAttr>()) {
const auto topology = outputTopology->getTopology().lower();
outputPrimitive =
llvm::StringSwitch<spv::ExecutionMode>(topology)
.Case("point", spv::ExecutionMode::OutputPoints)
.Case("line", spv::ExecutionMode::OutputLinesNV)
.Case("triangle", spv::ExecutionMode::OutputTrianglesNV);
if (outputPrimitive != spv::ExecutionMode::Max) {
spvBuilder.addExecutionMode(entryFunction, outputPrimitive, {},
decl->getLocation());
} else {
emitError("unknown output topology in mesh shader",
outputTopology->getLocation());
return false;
}
}
uint32_t numVertices = 0;
uint32_t numIndices = 0;
uint32_t numPrimitives = 0;
bool payloadDeclSeen = false;
for (uint32_t i = 0; i < decl->getNumParams(); i++) {
const auto param = decl->getParamDecl(i);
const auto paramType = param->getType();
const auto paramLoc = param->getLocation();
if (param->hasAttr<HLSLVerticesAttr>() ||
param->hasAttr<HLSLIndicesAttr>() ||
param->hasAttr<HLSLPrimitivesAttr>()) {
uint32_t arraySize = 0;
if (const auto *arrayType =
astContext.getAsConstantArrayType(paramType)) {
const auto eleType =
arrayType->getElementType()->getCanonicalTypeUnqualified();
if (param->hasAttr<HLSLIndicesAttr>()) {
switch (outputPrimitive) {
case spv::ExecutionMode::OutputPoints:
if (eleType != astContext.UnsignedIntTy) {
emitError("expected 1D array of uint type", paramLoc);
return false;
}
break;
case spv::ExecutionMode::OutputLinesNV: {
QualType baseType;
uint32_t length;
if (!isVectorType(eleType, &baseType, &length) ||
baseType != astContext.UnsignedIntTy || length != 2) {
emitError("expected 1D array of uint2 type", paramLoc);
return false;
}
break;
}
case spv::ExecutionMode::OutputTrianglesNV: {
QualType baseType;
uint32_t length;
if (!isVectorType(eleType, &baseType, &length) ||
baseType != astContext.UnsignedIntTy || length != 3) {
emitError("expected 1D array of uint3 type", paramLoc);
return false;
}
break;
}
default:
assert(false && "unexpected spirv execution mode");
}
} else if (!eleType->isStructureType()) {
// vertices/primitives objects
emitError("expected 1D array of struct type", paramLoc);
return false;
}
arraySize = static_cast<uint32_t>(arrayType->getSize().getZExtValue());
} else {
emitError("expected 1D array of indices/vertices/primitives object",
paramLoc);
return false;
}
if (param->hasAttr<HLSLVerticesAttr>()) {
if (numVertices != 0) {
emitError("only one object with 'vertices' modifier is allowed",
paramLoc);
return false;
}
numVertices = arraySize;
} else if (param->hasAttr<HLSLIndicesAttr>()) {
if (numIndices != 0) {
emitError("only one object with 'indices' modifier is allowed",
paramLoc);
return false;
}
numIndices = arraySize;
} else if (param->hasAttr<HLSLPrimitivesAttr>()) {
if (numPrimitives != 0) {
emitError("only one object with 'primitives' modifier is allowed",
paramLoc);
return false;
}
numPrimitives = arraySize;
}
} else if (param->hasAttr<HLSLPayloadAttr>()) {
if (payloadDeclSeen) {
emitError("only one object with 'payload' modifier is allowed",
paramLoc);
return false;
}
payloadDeclSeen = true;
if (!paramType->isStructureType()) {
emitError("expected payload of struct type", paramLoc);
return false;
}
}
}
// Vertex attribute array is a mandatory param to mesh entry function.
if (numVertices != 0) {
*outVerticesArraySize = numVertices;
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode::OutputVertices,
{static_cast<uint32_t>(numVertices)}, decl->getLocation());
} else {
emitError("expected vertices object declaration", decl->getLocation());
return false;
}
// Vertex indices array is a mandatory param to mesh entry function.
if (numIndices != 0) {
spvBuilder.addExecutionMode(
entryFunction, spv::ExecutionMode::OutputPrimitivesNV,
{static_cast<uint32_t>(numIndices)}, decl->getLocation());
// Primitive attribute array is an optional param to mesh entry function,
// but the array size should match the indices array.
if (numPrimitives != 0 && numPrimitives != numIndices) {
emitError("array size of primitives object should match 'indices' object",
decl->getLocation());
return false;
}
} else {
emitError("expected indices object declaration", decl->getLocation());
return false;
}
return true;
}
bool SpirvEmitter::emitEntryFunctionWrapper(const FunctionDecl *decl,
SpirvFunction *entryFuncInstr) {
// HS specific attributes
uint32_t numOutputControlPoints = 0;
SpirvInstruction *outputControlPointIdVal =
nullptr; // SV_OutputControlPointID value
SpirvInstruction *primitiveIdVar = nullptr; // SV_PrimitiveID variable
SpirvInstruction *viewIdVar = nullptr; // SV_ViewID variable
SpirvInstruction *hullMainInputPatchParam =
nullptr; // Temporary parameter for InputPatch<>
// The array size of per-vertex input/output variables
// Used by HS/DS/GS for the additional arrayness, zero means not an array.
uint32_t inputArraySize = 0;
uint32_t outputArraySize = 0;
// The wrapper entry function surely does not have pre-assigned <result-id>
// for it like other functions that got added to the work queue following
// function calls. And the wrapper is the entry function.
entryFunction = spvBuilder.beginFunction(
astContext.VoidTy, decl->getLocStart(), decl->getName());
// Specify that entryFunction is an entry function wrapper.
entryFunction->setEntryFunctionWrapper();
// Note this should happen before using declIdMapper for other tasks.
declIdMapper.setEntryFunction(entryFunction);
// Set entryFunction for current entry point.
auto iter = functionInfoMap.find(decl);
assert(iter != functionInfoMap.end());
auto &entryInfo = iter->second;
assert(entryInfo->isEntryFunction);
entryInfo->entryFunction = entryFunction;
if (spvContext.isRay()) {
return emitEntryFunctionWrapperForRayTracing(decl, entryFuncInstr);
}
// Handle attributes specific to each shader stage
if (spvContext.isPS()) {
processPixelShaderAttributes(decl);
} else if (spvContext.isCS()) {
processComputeShaderAttributes(decl);
} else if (spvContext.isHS()) {
if (!processTessellationShaderAttributes(decl, &numOutputControlPoints))
return false;
// The input array size for HS is specified in the InputPatch parameter.
for (const auto *param : decl->params())
if (hlsl::IsHLSLInputPatchType(param->getType())) {
inputArraySize = hlsl::GetHLSLInputPatchCount(param->getType());
break;
}
outputArraySize = numOutputControlPoints;
} else if (spvContext.isDS()) {
if (!processTessellationShaderAttributes(decl, &numOutputControlPoints))
return false;
// The input array size for HS is specified in the OutputPatch parameter.
for (const auto *param : decl->params())
if (hlsl::IsHLSLOutputPatchType(param->getType())) {
inputArraySize = hlsl::GetHLSLOutputPatchCount(param->getType());
break;
}
// The per-vertex output of DS is not an array.
} else if (spvContext.isGS()) {
if (!processGeometryShaderAttributes(decl, &inputArraySize))
return false;
// The per-vertex output of GS is not an array.
} else if (spvContext.isMS() || spvContext.isAS()) {
if (!processMeshOrAmplificationShaderAttributes(decl, &outputArraySize))
return false;
}
// Go through all parameters and record the declaration of SV_ClipDistance
// and SV_CullDistance. We need to do this extra step because in HLSL we
// can declare multiple SV_ClipDistance/SV_CullDistance variables of float
// or vector of float types, but we can only have one single float array
// for the ClipDistance/CullDistance builtin. So we need to group all
// SV_ClipDistance/SV_CullDistance variables into one float array, thus we
// need to calculate the total size of the array and the offset of each
// variable within that array.
// Also go through all parameters to record the semantic strings provided for
// the builtins in gl_PerVertex.
for (const auto *param : decl->params()) {
if (canActAsInParmVar(param))
if (!declIdMapper.glPerVertex.recordGlPerVertexDeclFacts(param, true))
return false;
if (canActAsOutParmVar(param))
if (!declIdMapper.glPerVertex.recordGlPerVertexDeclFacts(param, false))
return false;
}
// Also consider the SV_ClipDistance/SV_CullDistance in the return type
if (!declIdMapper.glPerVertex.recordGlPerVertexDeclFacts(decl, false))
return false;
// Calculate the total size of the ClipDistance/CullDistance array and the
// offset of SV_ClipDistance/SV_CullDistance variables within the array.
declIdMapper.glPerVertex.calculateClipCullDistanceArraySize();
if (!spvContext.isCS() && !spvContext.isAS()) {
// Generate stand-alone builtins of Position, ClipDistance, and
// CullDistance, which belongs to gl_PerVertex.
declIdMapper.glPerVertex.generateVars(inputArraySize, outputArraySize);
}
// The entry basic block.
auto *entryLabel = spvBuilder.createBasicBlock();
spvBuilder.setInsertPoint(entryLabel);
// Handle vk::execution_mode, vk::ext_extension and vk::ext_capability
// attributes. Uses pseudo-instructions for extensions and capabilities, which
// are added to the beginning of the entry basic block, so must be called
// after the basic block is created and insert point is set.
processInlineSpirvAttributes(decl);
// Initialize all global variables at the beginning of the wrapper
for (const VarDecl *varDecl : toInitGloalVars) {
// SPIR-V does not have string variables
if (isStringType(varDecl->getType()))
continue;
const auto varInfo =
declIdMapper.getDeclEvalInfo(varDecl, varDecl->getLocation());
if (const auto *init = varDecl->getInit()) {
parentMap = std::make_unique<ParentMap>(const_cast<Expr *>(init));
storeValue(varInfo, loadIfGLValue(init), varDecl->getType(),
init->getLocStart());
parentMap.reset(nullptr);
// Update counter variable associated with global variables
tryToAssignCounterVar(varDecl, init);
}
// If not explicitly initialized, initialize with their zero values if not
// resource objects
else if (!hlsl::IsHLSLResourceType(varDecl->getType())) {
auto *nullValue = spvBuilder.getConstantNull(varDecl->getType());
spvBuilder.createStore(varInfo, nullValue, varDecl->getLocation());
}
}
// Create temporary variables for holding function call arguments
llvm::SmallVector<SpirvInstruction *, 4> params;
for (const auto *param : decl->params()) {
const auto paramType = param->getType();
std::string tempVarName = "param.var." + param->getNameAsString();
auto *tempVar =
spvBuilder.addFnVar(paramType, param->getLocation(), tempVarName,
param->hasAttr<HLSLPreciseAttr>(),
param->hasAttr<HLSLNoInterpolationAttr>());
params.push_back(tempVar);
// Create the stage input variable for parameter not marked as pure out and
// initialize the corresponding temporary variable
// Also do not create input variables for output stream objects of geometry
// shaders (e.g. TriangleStream) which are required to be marked as 'inout'.
if (canActAsInParmVar(param)) {
if (spvContext.isHS() && hlsl::IsHLSLInputPatchType(paramType)) {
// Record the temporary variable holding InputPatch. It may be used
// later in the patch constant function.
hullMainInputPatchParam = tempVar;
}
SpirvInstruction *loadedValue = nullptr;
if (!declIdMapper.createStageInputVar(param, &loadedValue, false))
return false;
// Only initialize the temporary variable if the parameter is indeed used,
// or if it is an inout parameter.
if (param->isUsed() || param->hasAttr<HLSLInOutAttr>()) {
spvBuilder.createStore(tempVar, loadedValue, param->getLocation());
// param is mapped to store object. Coule be a componentConstruct or
// Load
if (spvContext.isPS())
spvBuilder.addPerVertexStgInputFuncVarEntry(loadedValue, tempVar);
}
// Record the temporary variable holding SV_OutputControlPointID,
// SV_PrimitiveID, and SV_ViewID. It may be used later in the patch
// constant function.
if (hasSemantic(param, hlsl::DXIL::SemanticKind::OutputControlPointID))
outputControlPointIdVal = loadedValue;
else if (hasSemantic(param, hlsl::DXIL::SemanticKind::PrimitiveID))
primitiveIdVar = tempVar;
else if (hasSemantic(param, hlsl::DXIL::SemanticKind::ViewID))
viewIdVar = tempVar;
}
}
// Call the original entry function
const QualType retType = decl->getReturnType();
auto *retVal = spvBuilder.createFunctionCall(retType, entryFuncInstr, params,
decl->getLocStart());
// Create and write stage output variables for return value. Special case for
// Hull shaders since they operate differently in 2 ways:
// 1- Their return value is in fact an array and each invocation should write
// to the proper offset in the array.
// 2- The patch constant function must be called *once* after all invocations
// of the main entry point function is done.
if (spvContext.isHS()) {
// Create stage output variables out of the return type.
if (!declIdMapper.createStageOutputVar(decl, numOutputControlPoints,
outputControlPointIdVal, retVal))
return false;
if (!processHSEntryPointOutputAndPCF(
decl, retType, retVal, numOutputControlPoints,
outputControlPointIdVal, primitiveIdVar, viewIdVar,
hullMainInputPatchParam))
return false;
} else {
if (!declIdMapper.createStageOutputVar(decl, retVal, /*forPCF*/ false))
return false;
}
// Create and write stage output variables for parameters marked as
// out/inout
for (uint32_t i = 0; i < decl->getNumParams(); ++i) {
const auto *param = decl->getParamDecl(i);
if (canActAsOutParmVar(param)) {
// Load the value from the parameter after function call
SpirvInstruction *loadedParam = nullptr;
// No need to write back the value if the parameter is not used at all in
// the original entry function, unless it is an inout paramter.
//
// Write back of stage output variables in GS is manually controlled by
// .Append() intrinsic method. No need to load the parameter since we
// won't need to write back here.
if ((param->isUsed() || param->hasAttr<HLSLInOutAttr>()) &&
!spvContext.isGS())
loadedParam = spvBuilder.createLoad(param->getType(), params[i],
param->getLocStart());
if (!declIdMapper.createStageOutputVar(param, loadedParam, false))
return false;
}
}
// To prevent spirv-opt from removing all debug info, we emit at least
// a single OpLine to specify the end of the shader. This SourceLocation
// will provide the information.
spvBuilder.createReturn(decl->getLocEnd());
spvBuilder.endFunction();
// For Hull shaders, there is no explicit call to the PCF in the HLSL source.
// We should invoke a translation of the PCF manually.
if (spvContext.isHS())
doDecl(patchConstFunc);
return true;
}
bool SpirvEmitter::processHSEntryPointOutputAndPCF(
const FunctionDecl *hullMainFuncDecl, QualType retType,
SpirvInstruction *retVal, uint32_t numOutputControlPoints,
SpirvInstruction *outputControlPointId, SpirvInstruction *primitiveId,
SpirvInstruction *viewId, SpirvInstruction *hullMainInputPatch) {
// This method may only be called for Hull shaders.
assert(spvContext.isHS());
auto loc = hullMainFuncDecl->getLocation();
auto locEnd = hullMainFuncDecl->getLocEnd();
// For Hull shaders, the real output is an array of size
// numOutputControlPoints. The results of the main should be written to the
// correct offset in the array (based on InvocationID).
if (!numOutputControlPoints) {
emitError("number of output control points cannot be zero", loc);
return false;
}
// TODO: We should be able to handle cases where the SV_OutputControlPointID
// is not provided.
if (!outputControlPointId) {
emitError(
"SV_OutputControlPointID semantic must be provided in hull shader",
loc);
return false;
}
if (!patchConstFunc) {
emitError("patch constant function not defined in hull shader", loc);
return false;
}
// Now create a barrier before calling the Patch Constant Function (PCF).
// Flags are:
// Execution Barrier scope = Workgroup (2)
// Memory Barrier scope = Invocation (4)
// Memory Semantics Barrier scope = None (0)
spvBuilder.createBarrier(spv::Scope::Invocation,
spv::MemorySemanticsMask::MaskNone,
spv::Scope::Workgroup, {});
SpirvInstruction *hullMainOutputPatch = nullptr;
// If the patch constant function (PCF) takes the result of the Hull main
// entry point, create a temporary function-scope variable and write the
// results to it, so it can be passed to the PCF.
if (const ParmVarDecl *outputPatchDecl =
patchConstFuncTakesHullOutputPatch(patchConstFunc)) {
const QualType hullMainRetType = astContext.getConstantArrayType(
retType, llvm::APInt(32, numOutputControlPoints),
clang::ArrayType::Normal, 0);
hullMainOutputPatch =
spvBuilder.addFnVar(hullMainRetType, locEnd, "temp.var.hullMainRetVal");
declIdMapper.copyHullOutStageVarsToOutputPatch(
hullMainOutputPatch, outputPatchDecl, retType, numOutputControlPoints);
}
// The PCF should be called only once. Therefore, we check the invocationID,
// and we only allow ID 0 to call the PCF.
auto *condition = spvBuilder.createBinaryOp(
spv::Op::OpIEqual, astContext.BoolTy, outputControlPointId,
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0)),
loc);
auto *thenBB = spvBuilder.createBasicBlock("if.true");
auto *mergeBB = spvBuilder.createBasicBlock("if.merge");
spvBuilder.createConditionalBranch(condition, thenBB, mergeBB, loc, mergeBB);
spvBuilder.addSuccessor(thenBB);
spvBuilder.addSuccessor(mergeBB);
spvBuilder.setMergeTarget(mergeBB);
spvBuilder.setInsertPoint(thenBB);
// Call the PCF. Since the function is not explicitly called, we must first
// register an ID for it.
SpirvFunction *pcfId = declIdMapper.getOrRegisterFn(patchConstFunc);
const QualType pcfRetType = patchConstFunc->getReturnType();
std::vector<SpirvInstruction *> pcfParams;
for (const auto *param : patchConstFunc->parameters()) {
// Note: According to the HLSL reference, the PCF takes an InputPatch of
// ControlPoints as well as the PatchID (PrimitiveID). This does not
// necessarily mean that they are present. There is also no requirement
// for the order of parameters passed to PCF.
if (hlsl::IsHLSLInputPatchType(param->getType())) {
pcfParams.push_back(hullMainInputPatch);
} else if (hlsl::IsHLSLOutputPatchType(param->getType())) {
pcfParams.push_back(hullMainOutputPatch);
} else if (hasSemantic(param, hlsl::DXIL::SemanticKind::PrimitiveID)) {
if (!primitiveId) {
primitiveId = createPCFParmVarAndInitFromStageInputVar(param);
}
pcfParams.push_back(primitiveId);
} else if (hasSemantic(param, hlsl::DXIL::SemanticKind::ViewID)) {
if (!viewId) {
viewId = createPCFParmVarAndInitFromStageInputVar(param);
}
pcfParams.push_back(viewId);
} else if (param->hasAttr<HLSLOutAttr>()) {
// Create a temporary function scope variable to pass to the PCF function
// for the output. The value of this variable should be copied to an
// output variable for the param after the function call.
pcfParams.push_back(createFunctionScopeTempFromParameter(param));
} else {
emitError("patch constant function parameter '%0' unknown",
param->getLocation())
<< param->getName();
}
}
auto *pcfResultId = spvBuilder.createFunctionCall(
pcfRetType, pcfId, {pcfParams}, hullMainFuncDecl->getLocStart());
if (!declIdMapper.createStageOutputVar(patchConstFunc, pcfResultId,
/*forPCF*/ true))
return false;
// Traverse all of the parameters for the patch constant function and copy out
// all of the output variables.
for (uint32_t idx = 0; idx < patchConstFunc->parameters().size(); idx++) {
const auto *param = patchConstFunc->parameters()[idx];
if (param->hasAttr<HLSLOutAttr>()) {
SpirvInstruction *pcfParam = pcfParams[idx];
SpirvInstruction *loadedValue = spvBuilder.createLoad(
pcfParam->getAstResultType(), pcfParam, param->getLocation());
declIdMapper.createStageOutputVar(param, loadedValue, /*forPCF*/ true);
}
}
spvBuilder.createBranch(mergeBB, locEnd);
spvBuilder.addSuccessor(mergeBB);
spvBuilder.setInsertPoint(mergeBB);
return true;
}
bool SpirvEmitter::allSwitchCasesAreIntegerLiterals(const Stmt *root) {
if (!root)
return false;
const auto *caseStmt = dyn_cast<CaseStmt>(root);
const auto *compoundStmt = dyn_cast<CompoundStmt>(root);
if (!caseStmt && !compoundStmt)
return true;
if (caseStmt) {
const Expr *caseExpr = caseStmt->getLHS();
return caseExpr && caseExpr->isEvaluatable(astContext);
}
// Recurse down if facing a compound statement.
for (auto *st : compoundStmt->body())
if (!allSwitchCasesAreIntegerLiterals(st))
return false;
return true;
}
void SpirvEmitter::discoverAllCaseStmtInSwitchStmt(
const Stmt *root, SpirvBasicBlock **defaultBB,
std::vector<std::pair<llvm::APInt, SpirvBasicBlock *>> *targets) {
if (!root)
return;
// A switch case can only appear in DefaultStmt, CaseStmt, or
// CompoundStmt. For the rest, we can just return.
const auto *defaultStmt = dyn_cast<DefaultStmt>(root);
const auto *caseStmt = dyn_cast<CaseStmt>(root);
const auto *compoundStmt = dyn_cast<CompoundStmt>(root);
if (!defaultStmt && !caseStmt && !compoundStmt)
return;
// Recurse down if facing a compound statement.
if (compoundStmt) {
for (auto *st : compoundStmt->body())
discoverAllCaseStmtInSwitchStmt(st, defaultBB, targets);
return;
}
std::string caseLabel;
llvm::APInt caseValue;
if (defaultStmt) {
// This is the default branch.
caseLabel = "switch.default";
} else if (caseStmt) {
// This is a non-default case.
// When using OpSwitch, we only allow integer literal cases. e.g:
// case <literal_integer>: {...; break;}
const Expr *caseExpr = caseStmt->getLHS();
assert(caseExpr && caseExpr->isEvaluatable(astContext));
Expr::EvalResult evalResult;
caseExpr->EvaluateAsRValue(evalResult, astContext);
caseValue = evalResult.Val.getInt();
const int64_t value = caseValue.getSExtValue();
caseLabel = "switch." + std::string(value < 0 ? "n" : "") +
llvm::itostr(std::abs(value));
}
auto *caseBB = spvBuilder.createBasicBlock(caseLabel);
spvBuilder.addSuccessor(caseBB);
stmtBasicBlock[root] = caseBB;
// Add all cases to the 'targets' vector.
if (caseStmt)
targets->emplace_back(caseValue, caseBB);
// The default label is not part of the 'targets' vector that is passed
// to the OpSwitch instruction.
// If default statement was discovered, return its label via defaultBB.
if (defaultStmt)
*defaultBB = caseBB;
// Process cases nested in other cases. It happens when we have fall through
// cases. For example:
// case 1: case 2: ...; break;
// will result in the CaseSmt for case 2 nested in the one for case 1.
discoverAllCaseStmtInSwitchStmt(caseStmt ? caseStmt->getSubStmt()
: defaultStmt->getSubStmt(),
defaultBB, targets);
}
void SpirvEmitter::flattenSwitchStmtAST(const Stmt *root,
std::vector<const Stmt *> *flatSwitch) {
const auto *caseStmt = dyn_cast<CaseStmt>(root);
const auto *compoundStmt = dyn_cast<CompoundStmt>(root);
const auto *defaultStmt = dyn_cast<DefaultStmt>(root);
if (!compoundStmt) {
flatSwitch->push_back(root);
}
if (compoundStmt) {
for (const auto *st : compoundStmt->body())
flattenSwitchStmtAST(st, flatSwitch);
} else if (caseStmt) {
flattenSwitchStmtAST(caseStmt->getSubStmt(), flatSwitch);
} else if (defaultStmt) {
flattenSwitchStmtAST(defaultStmt->getSubStmt(), flatSwitch);
}
}
void SpirvEmitter::processCaseStmtOrDefaultStmt(const Stmt *stmt) {
auto *caseStmt = dyn_cast<CaseStmt>(stmt);
auto *defaultStmt = dyn_cast<DefaultStmt>(stmt);
assert(caseStmt || defaultStmt);
auto *caseBB = stmtBasicBlock[stmt];
if (!spvBuilder.isCurrentBasicBlockTerminated()) {
// We are about to handle the case passed in as parameter. If the current
// basic block is not terminated, it means the previous case is a fall
// through case. We need to link it to the case to be processed.
spvBuilder.createBranch(caseBB, stmt->getLocStart());
spvBuilder.addSuccessor(caseBB);
}
spvBuilder.setInsertPoint(caseBB);
doStmt(caseStmt ? caseStmt->getSubStmt() : defaultStmt->getSubStmt());
}
void SpirvEmitter::processSwitchStmtUsingSpirvOpSwitch(
const SwitchStmt *switchStmt) {
const SourceLocation srcLoc = switchStmt->getSwitchLoc();
// First handle the condition variable DeclStmt if one exists.
// For example: handle 'int a = b' in the following:
// switch (int a = b) {...}
if (const auto *condVarDeclStmt = switchStmt->getConditionVariableDeclStmt())
doDeclStmt(condVarDeclStmt);
auto *cond = switchStmt->getCond();
auto *selector = doExpr(cond);
// We need a merge block regardless of the number of switch cases.
// Since OpSwitch always requires a default label, if the switch statement
// does not have a default branch, we use the merge block as the default
// target.
auto *mergeBB = spvBuilder.createBasicBlock("switch.merge");
spvBuilder.setMergeTarget(mergeBB);
breakStack.push(mergeBB);
auto *defaultBB = mergeBB;
// (literal, labelId) pairs to pass to the OpSwitch instruction.
std::vector<std::pair<llvm::APInt, SpirvBasicBlock *>> targets;
discoverAllCaseStmtInSwitchStmt(switchStmt->getBody(), &defaultBB, &targets);
// Create the OpSelectionMerge and OpSwitch.
spvBuilder.createSwitch(mergeBB, selector, defaultBB, targets, srcLoc,
cond->getSourceRange());
// Handle the switch body.
doStmt(switchStmt->getBody());
if (!spvBuilder.isCurrentBasicBlockTerminated())
spvBuilder.createBranch(mergeBB, switchStmt->getLocEnd());
spvBuilder.setInsertPoint(mergeBB);
breakStack.pop();
}
void SpirvEmitter::processSwitchStmtUsingIfStmts(const SwitchStmt *switchStmt) {
std::vector<const Stmt *> flatSwitch;
flattenSwitchStmtAST(switchStmt->getBody(), &flatSwitch);
// First handle the condition variable DeclStmt if one exists.
// For example: handle 'int a = b' in the following:
// switch (int a = b) {...}
if (const auto *condVarDeclStmt = switchStmt->getConditionVariableDeclStmt())
doDeclStmt(condVarDeclStmt);
// Figure out the indexes of CaseStmts (and DefaultStmt if it exists) in
// the flattened switch AST.
// For instance, for the following flat vector:
// +-----+-----+-----+-----+-----+-----+-----+-----+-----+-------+-----+
// |Case1|Stmt1|Case2|Stmt2|Break|Case3|Case4|Stmt4|Break|Default|Stmt5|
// +-----+-----+-----+-----+-----+-----+-----+-----+-----+-------+-----+
// The indexes are: {0, 2, 5, 6, 9}
std::vector<uint32_t> caseStmtLocs;
for (uint32_t i = 0; i < flatSwitch.size(); ++i)
if (isa<CaseStmt>(flatSwitch[i]) || isa<DefaultStmt>(flatSwitch[i]))
caseStmtLocs.push_back(i);
IfStmt *prevIfStmt = nullptr;
IfStmt *rootIfStmt = nullptr;
CompoundStmt *defaultBody = nullptr;
// For each case, start at its index in the vector, and go forward
// accumulating statements until BreakStmt or end of vector is reached.
for (auto curCaseIndex : caseStmtLocs) {
const Stmt *curCase = flatSwitch[curCaseIndex];
// CompoundStmt to hold all statements for this case.
CompoundStmt *cs = new (astContext) CompoundStmt(Stmt::EmptyShell());
// Accumulate all non-case/default/break statements as the body for the
// current case.
std::vector<Stmt *> statements;
unsigned i = curCaseIndex + 1;
for (; i < flatSwitch.size() && !isa<BreakStmt>(flatSwitch[i]) &&
!isa<ReturnStmt>(flatSwitch[i]);
++i) {
if (!isa<CaseStmt>(flatSwitch[i]) && !isa<DefaultStmt>(flatSwitch[i]))
statements.push_back(const_cast<Stmt *>(flatSwitch[i]));
}
if (!statements.empty())
cs->setStmts(astContext, statements.data(), statements.size());
SourceLocation mergeLoc =
(i < flatSwitch.size() && isa<BreakStmt>(flatSwitch[i]))
? flatSwitch[i]->getLocStart()
: SourceLocation();
// For non-default cases, generate the IfStmt that compares the switch
// value to the case value.
if (auto *caseStmt = dyn_cast<CaseStmt>(curCase)) {
IfStmt *curIf = new (astContext) IfStmt(Stmt::EmptyShell());
BinaryOperator *bo = new (astContext) BinaryOperator(Stmt::EmptyShell());
// Expr *tmp_cond = new (astContext) Expr(*switchStmt->getCond());
bo->setLHS(const_cast<Expr *>(switchStmt->getCond()));
bo->setRHS(const_cast<Expr *>(caseStmt->getLHS()));
bo->setOpcode(BO_EQ);
bo->setType(astContext.getLogicalOperationType());
curIf->setCond(bo);
curIf->setThen(cs);
curIf->setMergeLoc(mergeLoc);
curIf->setIfLoc(prevIfStmt ? SourceLocation() : caseStmt->getCaseLoc());
// No conditional variable associated with this faux if statement.
curIf->setConditionVariable(astContext, nullptr);
// Each If statement is the "else" of the previous if statement.
if (prevIfStmt) {
prevIfStmt->setElse(curIf);
prevIfStmt->setElseLoc(caseStmt->getCaseLoc());
} else
rootIfStmt = curIf;
prevIfStmt = curIf;
} else {
// Record the DefaultStmt body as it will be used as the body of the
// "else" block in the if-elseif-...-else pattern.
defaultBody = cs;
}
}
// If a default case exists, it is the "else" of the last if statement.
if (prevIfStmt)
prevIfStmt->setElse(defaultBody);
// Since all else-if and else statements are the child nodes of the first
// IfStmt, we only need to call doStmt for the first IfStmt.
if (rootIfStmt)
doStmt(rootIfStmt);
// If there are no CaseStmt and there is only 1 DefaultStmt, there will be
// no if statements. The switch in that case only executes the body of the
// default case.
else if (defaultBody)
doStmt(defaultBody);
}
SpirvInstruction *SpirvEmitter::extractVecFromVec4(SpirvInstruction *from,
uint32_t targetVecSize,
QualType targetElemType,
SourceLocation loc,
SourceRange range) {
assert(targetVecSize > 0 && targetVecSize < 5);
const QualType retType =
targetVecSize == 1
? targetElemType
: astContext.getExtVectorType(targetElemType, targetVecSize);
switch (targetVecSize) {
case 1:
return spvBuilder.createCompositeExtract(retType, from, {0}, loc, range);
break;
case 2:
return spvBuilder.createVectorShuffle(retType, from, from, {0, 1}, loc,
range);
break;
case 3:
return spvBuilder.createVectorShuffle(retType, from, from, {0, 1, 2}, loc,
range);
break;
case 4:
return from;
default:
llvm_unreachable("vector element count must be 1, 2, 3, or 4");
}
}
void SpirvEmitter::addFunctionToWorkQueue(hlsl::DXIL::ShaderKind shaderKind,
const clang::FunctionDecl *fnDecl,
bool isEntryFunction) {
// Only update the workQueue and the function info map if the given
// FunctionDecl hasn't been added already.
if (functionInfoMap.find(fnDecl) == functionInfoMap.end()) {
// Note: The function is just discovered and is being added to the
// workQueue, therefore it does not have the entryFunction SPIR-V
// instruction yet (use nullptr).
auto *fnInfo = new (spvContext) FunctionInfo(
shaderKind, fnDecl, /*entryFunction*/ nullptr, isEntryFunction);
functionInfoMap[fnDecl] = fnInfo;
workQueue.push_back(fnInfo);
}
}
SpirvInstruction *
SpirvEmitter::processTraceRayInline(const CXXMemberCallExpr *expr) {
const auto object = expr->getImplicitObjectArgument();
uint32_t templateFlags = hlsl::GetHLSLResourceTemplateUInt(object->getType());
const auto constFlags = spvBuilder.getConstantInt(
astContext.UnsignedIntTy, llvm::APInt(32, templateFlags));
SpirvInstruction *rayqueryObj = loadIfAliasVarRef(object);
const auto args = expr->getArgs();
if (expr->getNumArgs() != 4) {
emitError("invalid number of arguments to RayQueryInitialize",
expr->getExprLoc());
}
// HLSL Func
// void RayQuery::TraceRayInline(
// RaytracingAccelerationStructure AccelerationStructure,
// uint RayFlags,
// uint InstanceInclusionMask,
// RayDesc Ray);
// void OpRayQueryInitializeKHR ( <id> RayQuery,
// <id> Acceleration Structure
// <id> RayFlags
// <id> CullMask
// <id> RayOrigin
// <id> RayTmin
// <id> RayDirection
// <id> Ray Tmax)
const auto accelStructure = doExpr(args[0]);
SpirvInstruction *rayFlags = nullptr;
if ((rayFlags =
constEvaluator.tryToEvaluateAsConst(args[1], isSpecConstantMode))) {
rayFlags->setRValue();
} else {
rayFlags = doExpr(args[1]);
}
if (auto constFlags = dyn_cast<SpirvConstantInteger>(rayFlags)) {
auto interRayFlags = constFlags->getValue().getZExtValue();
templateFlags |= interRayFlags;
}
bool hasCullFlags =
templateFlags & (uint32_t(hlsl::DXIL::RayFlag::SkipTriangles) |
uint32_t(hlsl::DXIL::RayFlag::SkipProceduralPrimitives));
auto loc = args[1]->getLocStart();
rayFlags =
spvBuilder.createBinaryOp(spv::Op::OpBitwiseOr, astContext.UnsignedIntTy,
constFlags, rayFlags, loc);
const auto cullMask = doExpr(args[2]);
// Extract the ray description to match SPIR-V
const auto floatType = astContext.FloatTy;
const auto vecType = astContext.getExtVectorType(astContext.FloatTy, 3);
SpirvInstruction *rayDescArg = doExpr(args[3]);
loc = args[3]->getLocStart();
const auto origin =
spvBuilder.createCompositeExtract(vecType, rayDescArg, {0}, loc);
const auto tMin =
spvBuilder.createCompositeExtract(floatType, rayDescArg, {1}, loc);
const auto direction =
spvBuilder.createCompositeExtract(vecType, rayDescArg, {2}, loc);
const auto tMax =
spvBuilder.createCompositeExtract(floatType, rayDescArg, {3}, loc);
llvm::SmallVector<SpirvInstruction *, 8> traceArgs = {
rayqueryObj, accelStructure, rayFlags, cullMask,
origin, tMin, direction, tMax};
return spvBuilder.createRayQueryOpsKHR(spv::Op::OpRayQueryInitializeKHR,
QualType(), traceArgs, hasCullFlags,
expr->getExprLoc());
}
SpirvInstruction *
SpirvEmitter::processRayQueryIntrinsics(const CXXMemberCallExpr *expr,
hlsl::IntrinsicOp opcode) {
const auto object = expr->getImplicitObjectArgument();
SpirvInstruction *rayqueryObj = loadIfAliasVarRef(object);
const auto args = expr->getArgs();
llvm::SmallVector<SpirvInstruction *, 8> traceArgs;
traceArgs.push_back(rayqueryObj);
for (uint32_t i = 0; i < expr->getNumArgs(); ++i) {
traceArgs.push_back(doExpr(args[i]));
}
spv::Op spvCode = spv::Op::Max;
QualType exprType = expr->getType();
exprType = exprType->isVoidType() ? QualType() : exprType;
const auto candidateIntersection =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 0));
const auto committedIntersection =
spvBuilder.getConstantInt(astContext.UnsignedIntTy, llvm::APInt(32, 1));
bool transposeMatrix = false;
bool logicalNot = false;
using namespace hlsl;
switch (opcode) {
case IntrinsicOp::MOP_Proceed:
spvCode = spv::Op::OpRayQueryProceedKHR;
break;
case IntrinsicOp::MOP_Abort:
spvCode = spv::Op::OpRayQueryTerminateKHR;
exprType = QualType();
break;
case IntrinsicOp::MOP_CandidateGeometryIndex:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionGeometryIndexKHR;
break;
case IntrinsicOp::MOP_CandidateInstanceContributionToHitGroupIndex:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::
OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR;
break;
case IntrinsicOp::MOP_CandidateInstanceID:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionInstanceCustomIndexKHR;
break;
case IntrinsicOp::MOP_CandidateInstanceIndex:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionInstanceIdKHR;
break;
case IntrinsicOp::MOP_CandidateObjectRayDirection:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionObjectRayDirectionKHR;
break;
case IntrinsicOp::MOP_CandidateObjectRayOrigin:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionObjectRayOriginKHR;
break;
case IntrinsicOp::MOP_CandidateObjectToWorld3x4:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectToWorldKHR;
traceArgs.push_back(candidateIntersection);
transposeMatrix = true;
break;
case IntrinsicOp::MOP_CandidateObjectToWorld4x3:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectToWorldKHR;
traceArgs.push_back(candidateIntersection);
break;
case IntrinsicOp::MOP_CandidatePrimitiveIndex:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionPrimitiveIndexKHR;
break;
case IntrinsicOp::MOP_CandidateProceduralPrimitiveNonOpaque:
spvCode = spv::Op::OpRayQueryGetIntersectionCandidateAABBOpaqueKHR;
logicalNot = true;
break;
case IntrinsicOp::MOP_CandidateTriangleBarycentrics:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionBarycentricsKHR;
break;
case IntrinsicOp::MOP_CandidateTriangleFrontFace:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionFrontFaceKHR;
break;
case IntrinsicOp::MOP_CandidateTriangleRayT:
traceArgs.push_back(candidateIntersection);
spvCode = spv::Op::OpRayQueryGetIntersectionTKHR;
break;
case IntrinsicOp::MOP_CandidateType:
spvCode = spv::Op::OpRayQueryGetIntersectionTypeKHR;
traceArgs.push_back(candidateIntersection);
break;
case IntrinsicOp::MOP_CandidateWorldToObject4x3:
spvCode = spv::Op::OpRayQueryGetIntersectionWorldToObjectKHR;
traceArgs.push_back(candidateIntersection);
break;
case IntrinsicOp::MOP_CandidateWorldToObject3x4:
spvCode = spv::Op::OpRayQueryGetIntersectionWorldToObjectKHR;
traceArgs.push_back(candidateIntersection);
transposeMatrix = true;
break;
case IntrinsicOp::MOP_CommitNonOpaqueTriangleHit:
spvCode = spv::Op::OpRayQueryConfirmIntersectionKHR;
exprType = QualType();
break;
case IntrinsicOp::MOP_CommitProceduralPrimitiveHit:
spvCode = spv::Op::OpRayQueryGenerateIntersectionKHR;
exprType = QualType();
break;
case IntrinsicOp::MOP_CommittedGeometryIndex:
spvCode = spv::Op::OpRayQueryGetIntersectionGeometryIndexKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedInstanceContributionToHitGroupIndex:
spvCode = spv::Op::
OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedInstanceID:
spvCode = spv::Op::OpRayQueryGetIntersectionInstanceCustomIndexKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedInstanceIndex:
spvCode = spv::Op::OpRayQueryGetIntersectionInstanceIdKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedObjectRayDirection:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectRayDirectionKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedObjectRayOrigin:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectRayOriginKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedObjectToWorld3x4:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectToWorldKHR;
traceArgs.push_back(committedIntersection);
transposeMatrix = true;
break;
case IntrinsicOp::MOP_CommittedObjectToWorld4x3:
spvCode = spv::Op::OpRayQueryGetIntersectionObjectToWorldKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedPrimitiveIndex:
spvCode = spv::Op::OpRayQueryGetIntersectionPrimitiveIndexKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedRayT:
spvCode = spv::Op::OpRayQueryGetIntersectionTKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedStatus:
spvCode = spv::Op::OpRayQueryGetIntersectionTypeKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedTriangleBarycentrics:
spvCode = spv::Op::OpRayQueryGetIntersectionBarycentricsKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedTriangleFrontFace:
spvCode = spv::Op::OpRayQueryGetIntersectionFrontFaceKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_CommittedWorldToObject3x4:
spvCode = spv::Op::OpRayQueryGetIntersectionWorldToObjectKHR;
traceArgs.push_back(committedIntersection);
transposeMatrix = true;
break;
case IntrinsicOp::MOP_CommittedWorldToObject4x3:
spvCode = spv::Op::OpRayQueryGetIntersectionWorldToObjectKHR;
traceArgs.push_back(committedIntersection);
break;
case IntrinsicOp::MOP_RayFlags:
spvCode = spv::Op::OpRayQueryGetRayFlagsKHR;
break;
case IntrinsicOp::MOP_RayTMin:
spvCode = spv::Op::OpRayQueryGetRayTMinKHR;
break;
case IntrinsicOp::MOP_WorldRayDirection:
spvCode = spv::Op::OpRayQueryGetWorldRayDirectionKHR;
break;
case IntrinsicOp::MOP_WorldRayOrigin:
spvCode = spv::Op::OpRayQueryGetWorldRayOriginKHR;
break;
default:
emitError("intrinsic '%0' method unimplemented",
expr->getCallee()->getExprLoc())
<< getFunctionOrOperatorName(expr->getDirectCallee(), true);
return nullptr;
}
if (transposeMatrix) {
assert(hlsl::IsHLSLMatType(exprType) && "intrinsic should be matrix");
const clang::Type *type = exprType.getCanonicalType().getTypePtr();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateSpecDecl =
cast<ClassTemplateSpecializationDecl>(RT->getDecl());
ClassTemplateDecl *templateDecl =
templateSpecDecl->getSpecializedTemplate();
exprType = getHLSLMatrixType(astContext, theCompilerInstance.getSema(),
templateDecl, astContext.FloatTy, 4, 3);
}
const auto loc = expr->getExprLoc();
const auto range = expr->getSourceRange();
SpirvInstruction *retVal = spvBuilder.createRayQueryOpsKHR(
spvCode, exprType, traceArgs, false, loc, range);
if (transposeMatrix) {
retVal = spvBuilder.createUnaryOp(spv::Op::OpTranspose, expr->getType(),
retVal, loc, range);
}
if (logicalNot) {
retVal = spvBuilder.createUnaryOp(spv::Op::OpLogicalNot, expr->getType(),
retVal, loc, range);
}
retVal->setRValue();
return retVal;
}
SpirvInstruction *
SpirvEmitter::createSpirvIntrInstExt(llvm::ArrayRef<const Attr *> attrs,
QualType retType,
llvm::ArrayRef<SpirvInstruction *> spvArgs,
bool isInstr, SourceLocation loc) {
llvm::SmallVector<uint32_t, 2> capabilities;
llvm::SmallVector<llvm::StringRef, 2> extensions;
llvm::StringRef instSet = "";
// For [[vk::ext_type_def]], we use dummy OpNop with no semantic meaning,
// with possible extension and capabilities.
uint32_t op = static_cast<unsigned>(spv::Op::OpNop);
for (auto &attr : attrs) {
if (auto capAttr = dyn_cast<VKCapabilityExtAttr>(attr)) {
capabilities.push_back(capAttr->getCapability());
} else if (auto extAttr = dyn_cast<VKExtensionExtAttr>(attr)) {
extensions.push_back(extAttr->getName());
}
if (!isInstr)
continue;
if (auto instAttr = dyn_cast<VKInstructionExtAttr>(attr)) {
op = instAttr->getOpcode();
instSet = instAttr->getInstruction_set();
}
}
SpirvInstruction *retVal = spvBuilder.createSpirvIntrInstExt(
op, retType, spvArgs, extensions, instSet, capabilities, loc);
if (!retVal)
return nullptr;
// TODO: Revisit this r-value setting when handling vk::ext_result_id<T> ?
retVal->setRValue();
return retVal;
}
SpirvInstruction *SpirvEmitter::invertYIfRequested(SpirvInstruction *position,
SourceLocation loc,
SourceRange range) {
// Negate SV_Position.y if requested
if (spirvOptions.invertY) {
const auto oldY = spvBuilder.createCompositeExtract(
astContext.FloatTy, position, {1}, loc, range);
const auto newY = spvBuilder.createUnaryOp(
spv::Op::OpFNegate, astContext.FloatTy, oldY, loc, range);
position = spvBuilder.createCompositeInsert(
astContext.getExtVectorType(astContext.FloatTy, 4), position, {1}, newY,
loc, range);
}
return position;
}
SpirvInstruction *
SpirvEmitter::processSpvIntrinsicCallExpr(const CallExpr *expr) {
const auto *funcDecl = expr->getDirectCallee();
llvm::SmallVector<SpirvInstruction *, 8> spvArgs;
const auto args = expr->getArgs();
for (uint32_t i = 0; i < expr->getNumArgs(); ++i) {
const auto *param = funcDecl->getParamDecl(i);
const Expr *arg = args[i]->IgnoreParenLValueCasts();
SpirvInstruction *argInst = doExpr(arg);
if (param->hasAttr<VKReferenceExtAttr>()) {
if (argInst->isRValue()) {
emitError("argument for a parameter with vk::ext_reference attribute "
"must be a reference",
arg->getExprLoc());
return nullptr;
}
spvArgs.push_back(argInst);
} else if (param->hasAttr<VKLiteralExtAttr>()) {
auto constArg = dyn_cast<SpirvConstant>(argInst);
if (constArg == nullptr) {
constArg = constEvaluator.tryToEvaluateAsConst(arg, isSpecConstantMode);
}
if (constArg == nullptr) {
emitError("vk::ext_literal may only be applied to parameters that can "
"be evaluated to a literal value",
expr->getExprLoc());
return nullptr;
}
constArg->setLiteral();
spvArgs.push_back(constArg);
} else {
spvArgs.push_back(loadIfGLValue(arg, argInst));
}
}
return createSpirvIntrInstExt(funcDecl->getAttrs(), funcDecl->getReturnType(),
spvArgs,
/*isInstr*/ true, expr->getExprLoc());
}
uint32_t SpirvEmitter::getRawBufferAlignment(const Expr *expr) {
llvm::APSInt value;
if (expr->EvaluateAsInt(value, astContext) && value.isNonNegative()) {
return static_cast<uint32_t>(value.getZExtValue());
}
// Unable to determine a valid alignment at compile time
emitError("alignment argument must be a constant unsigned integer",
expr->getExprLoc());
return 0;
}
SpirvInstruction *SpirvEmitter::processRawBufferLoad(const CallExpr *callExpr) {
if (callExpr->getNumArgs() > 2) {
emitError("number of arguments for vk::RawBufferLoad() must be 1 or 2",
callExpr->getExprLoc());
return nullptr;
}
uint32_t alignment = callExpr->getNumArgs() == 1
? 4
: getRawBufferAlignment(callExpr->getArg(1));
if (alignment == 0)
return nullptr;
SpirvInstruction *address = doExpr(callExpr->getArg(0));
QualType bufferType = callExpr->getCallReturnType(astContext);
SourceLocation loc = callExpr->getExprLoc();
if (!isBoolOrVecMatOfBoolType(bufferType)) {
return loadDataFromRawAddress(address, bufferType, alignment, loc);
}
// If callExpr is `vk::RawBufferLoad<bool>(..)`, we have to load 'uint' and
// convert it to boolean data, because a physical pointer cannot have boolean
// type in Vulkan.
if (alignment % 4 != 0) {
emitWarning("Since boolean is a logical type, we use a unsigned integer "
"type to read/write boolean from a buffer. Therefore "
"alignment for the data with a boolean type must be aligned "
"with 4 bytes",
loc);
}
QualType boolType = bufferType;
bufferType = getUintTypeForBool(astContext, theCompilerInstance, boolType);
SpirvInstruction *load =
loadDataFromRawAddress(address, bufferType, alignment, loc);
auto *loadAsBool = castToBool(load, bufferType, boolType, loc);
if (!loadAsBool)
return nullptr;
loadAsBool->setRValue();
return loadAsBool;
}
SpirvInstruction *
SpirvEmitter::loadDataFromRawAddress(SpirvInstruction *addressInUInt64,
QualType bufferType, uint32_t alignment,
SourceLocation loc) {
// Summary:
// %address = OpBitcast %ptrTobufferType %addressInUInt64
// %loadInst = OpLoad %bufferType %address alignment %alignment
const HybridPointerType *bufferPtrType =
spvBuilder.getPhysicalStorageBufferType(bufferType);
SpirvUnaryOp *address = spvBuilder.createUnaryOp(
spv::Op::OpBitcast, bufferPtrType, addressInUInt64, loc);
address->setStorageClass(spv::StorageClass::PhysicalStorageBuffer);
address->setLayoutRule(spirvOptions.sBufferLayoutRule);
SpirvLoad *loadInst =
dyn_cast<SpirvLoad>(spvBuilder.createLoad(bufferType, address, loc));
assert(loadInst);
loadInst->setAlignment(alignment);
loadInst->setRValue();
return loadInst;
}
SpirvInstruction *
SpirvEmitter::storeDataToRawAddress(SpirvInstruction *addressInUInt64,
SpirvInstruction *value,
QualType bufferType, uint32_t alignment,
SourceLocation loc, SourceRange range) {
// Summary:
// %address = OpBitcast %ptrTobufferType %addressInUInt64
// %storeInst = OpStore %address %value alignment %alignment
if (!value || !addressInUInt64)
return nullptr;
const HybridPointerType *bufferPtrType =
spvBuilder.getPhysicalStorageBufferType(bufferType);
SpirvUnaryOp *address = spvBuilder.createUnaryOp(
spv::Op::OpBitcast, bufferPtrType, addressInUInt64, loc);
if (!address)
return nullptr;
address->setStorageClass(spv::StorageClass::PhysicalStorageBuffer);
address->setLayoutRule(spirvOptions.sBufferLayoutRule);
// If the source value has a different layout, it is not safe to directly
// store it. It needs to be component-wise reconstructed to the new layout.
SpirvInstruction *source = value;
if (value->getStorageClass() != address->getStorageClass()) {
source = reconstructValue(value, bufferType, address->getLayoutRule(), loc,
range);
}
if (!source)
return nullptr;
SpirvStore *storeInst = spvBuilder.createStore(address, source, loc);
storeInst->setAlignment(alignment);
storeInst->setStorageClass(spv::StorageClass::PhysicalStorageBuffer);
return nullptr;
}
SpirvInstruction *
SpirvEmitter::processRawBufferStore(const CallExpr *callExpr) {
if (callExpr->getNumArgs() != 2 && callExpr->getNumArgs() != 3) {
emitError("number of arguments for vk::RawBufferStore() must be 2 or 3",
callExpr->getExprLoc());
return nullptr;
}
uint32_t alignment = callExpr->getNumArgs() == 2
? 4
: getRawBufferAlignment(callExpr->getArg(2));
if (alignment == 0)
return nullptr;
SpirvInstruction *address = doExpr(callExpr->getArg(0));
SpirvInstruction *value = doExpr(callExpr->getArg(1));
if (!address || !value)
return nullptr;
QualType bufferType = value->getAstResultType();
clang::SourceLocation loc = callExpr->getExprLoc();
if (!isBoolOrVecMatOfBoolType(bufferType)) {
return storeDataToRawAddress(address, value, bufferType, alignment, loc,
callExpr->getLocStart());
}
// If callExpr is `vk::RawBufferLoad<bool>(..)`, we have to load 'uint' and
// convert it to boolean data, because a physical pointer cannot have boolean
// type in Vulkan.
if (alignment % 4 != 0) {
emitWarning("Since boolean is a logical type, we use a unsigned integer "
"type to read/write boolean from a buffer. Therefore "
"alignment for the data with a boolean type must be aligned "
"with 4 bytes",
loc);
}
QualType boolType = bufferType;
bufferType = getUintTypeForBool(astContext, theCompilerInstance, boolType);
auto *storeAsInt = castToInt(value, boolType, bufferType, loc);
return storeDataToRawAddress(address, storeAsInt, bufferType, alignment, loc,
callExpr->getLocStart());
}
SpirvInstruction *
SpirvEmitter::processIntrinsicExecutionMode(const CallExpr *expr,
bool useIdParams) {
llvm::SmallVector<uint32_t, 2> execModesParams;
uint32_t exeMode = 0;
const auto args = expr->getArgs();
for (uint32_t i = 0; i < expr->getNumArgs(); ++i) {
uint32_t argInteger;
Expr::EvalResult evalResult;
if (args[i]->EvaluateAsRValue(evalResult, astContext) &&
!evalResult.HasSideEffects && evalResult.Val.isInt()) {
argInteger = evalResult.Val.getInt().getZExtValue();
} else {
emitError("argument should be constant integer", expr->getExprLoc());
return nullptr;
}
if (i > 0)
execModesParams.push_back(argInteger);
else
exeMode = argInteger;
}
assert(entryFunction != nullptr);
assert(exeMode != 0);
return spvBuilder.addExecutionMode(
entryFunction, static_cast<spv::ExecutionMode>(exeMode), execModesParams,
expr->getExprLoc(), useIdParams);
}
SpirvInstruction *
SpirvEmitter::processSpvIntrinsicTypeDef(const CallExpr *expr) {
auto funcDecl = expr->getDirectCallee();
SmallVector<SpvIntrinsicTypeOperand, 3> operands;
const auto args = expr->getArgs();
for (uint32_t i = 0; i < expr->getNumArgs(); ++i) {
auto param = funcDecl->getParamDecl(i);
const Expr *arg = args[i]->IgnoreParenLValueCasts();
if (param->hasAttr<VKReferenceExtAttr>()) {
auto *recType = param->getType()->getAs<RecordType>();
if (recType && recType->getDecl()->getName() == "ext_type") {
auto typeId = hlsl::GetHLSLResourceTemplateUInt(arg->getType());
auto *typeArg = spvContext.getCreatedSpirvIntrinsicType(typeId);
operands.emplace_back(typeArg);
} else {
operands.emplace_back(doExpr(arg));
}
} else if (param->hasAttr<VKLiteralExtAttr>()) {
SpirvInstruction *argInst = doExpr(arg);
auto constArg = dyn_cast<SpirvConstant>(argInst);
assert(constArg != nullptr);
constArg->setLiteral();
operands.emplace_back(constArg);
} else {
operands.emplace_back(loadIfGLValue(arg));
}
}
auto typeDefAttr = funcDecl->getAttr<VKTypeDefExtAttr>();
spvContext.getOrCreateSpirvIntrinsicType(typeDefAttr->getId(),
typeDefAttr->getOpcode(), operands);
return createSpirvIntrInstExt(
funcDecl->getAttrs(), QualType(),
/*spvArgs*/ llvm::SmallVector<SpirvInstruction *, 1>{},
/*isInstr*/ false, expr->getExprLoc());
}
bool SpirvEmitter::spirvToolsValidate(std::vector<uint32_t> *mod,
std::string *messages) {
spvtools::SpirvTools tools(featureManager.getTargetEnv());
tools.SetMessageConsumer(
[messages](spv_message_level_t /*level*/, const char * /*source*/,
const spv_position_t & /*position*/,
const char *message) { *messages += message; });
spvtools::ValidatorOptions options;
options.SetBeforeHlslLegalization(beforeHlslLegalization);
// GL: strict block layout rules
// VK: relaxed block layout rules
// DX: Skip block layout rules
if (spirvOptions.useScalarLayout || spirvOptions.useDxLayout) {
options.SetScalarBlockLayout(true);
} else if (spirvOptions.useGlLayout) {
// spirv-val by default checks this.
} else {
options.SetRelaxBlockLayout(true);
}
options.SetUniversalLimit(spv_validator_limit_max_id_bound,
spirvOptions.maxId);
return tools.Validate(mod->data(), mod->size(), options);
}
void SpirvEmitter::addDerivativeGroupExecutionMode() {
assert(spvContext.isCS());
SpirvExecutionMode *numThreadsEm = spvBuilder.getModule()->findExecutionMode(
entryFunction, spv::ExecutionMode::LocalSize);
auto numThreads = numThreadsEm->getParams();
// The layout of the quad is determined by the numer of threads in each
// dimention. From the HLSL spec
// (https://microsoft.github.io/DirectX-Specs/d3d/HLSL_SM_6_6_Derivatives.html):
//
// Where numthreads has an X value divisible by 4 and Y and Z are both 1, the
// quad layouts are determined according to 1D quad rules. Where numthreads X
// and Y values are divisible by 2, the quad layouts are determined according
// to 2D quad rules. Using derivative operations in any numthreads
// configuration not matching either of these is invalid and will produce an
// error.
spv::ExecutionMode em = spv::ExecutionMode::DerivativeGroupQuadsNV;
if (numThreads[0] % 4 == 0 && numThreads[1] == 1 && numThreads[2] == 1) {
em = spv::ExecutionMode::DerivativeGroupLinearNV;
} else {
assert(numThreads[0] % 2 == 0 && numThreads[1] % 2 == 0);
}
spvBuilder.addExecutionMode(entryFunction, em, {}, SourceLocation());
}
SpirvVariable *SpirvEmitter::createPCFParmVarAndInitFromStageInputVar(
const ParmVarDecl *param) {
const QualType type = param->getType();
std::string tempVarName = "param.var." + param->getNameAsString();
auto paramLoc = param->getLocation();
auto *tempVar = spvBuilder.addFnVar(
type, paramLoc, tempVarName, param->hasAttr<HLSLPreciseAttr>(),
param->hasAttr<HLSLNoInterpolationAttr>());
SpirvInstruction *loadedValue = nullptr;
declIdMapper.createStageInputVar(param, &loadedValue, /*forPCF*/ true);
spvBuilder.createStore(tempVar, loadedValue, paramLoc);
return tempVar;
}
SpirvVariable *
SpirvEmitter::createFunctionScopeTempFromParameter(const ParmVarDecl *param) {
const QualType type = param->getType();
std::string tempVarName = "param.var." + param->getNameAsString();
auto paramLoc = param->getLocation();
auto *tempVar = spvBuilder.addFnVar(
type, paramLoc, tempVarName, param->hasAttr<HLSLPreciseAttr>(),
param->hasAttr<HLSLNoInterpolationAttr>());
return tempVar;
}
bool SpirvEmitter::spirvToolsRunPass(std::vector<uint32_t> *mod,
spvtools::Optimizer::PassToken token,
std::string *messages) {
spvtools::Optimizer optimizer(featureManager.getTargetEnv());
optimizer.SetMessageConsumer(
[messages](spv_message_level_t /*level*/, const char * /*source*/,
const spv_position_t & /*position*/,
const char *message) { *messages += message; });
string::RawOstreamBuf printAllBuf(llvm::errs());
std::ostream printAllOS(&printAllBuf);
if (spirvOptions.printAll)
optimizer.SetPrintAll(&printAllOS);
spvtools::OptimizerOptions options;
options.set_run_validator(false);
options.set_preserve_bindings(spirvOptions.preserveBindings);
options.set_max_id_bound(spirvOptions.maxId);
optimizer.RegisterPass(std::move(token));
return optimizer.Run(mod->data(), mod->size(), mod, options);
}
bool SpirvEmitter::spirvToolsFixupOpExtInst(std::vector<uint32_t> *mod,
std::string *messages) {
spvtools::Optimizer::PassToken token =
spvtools::CreateOpExtInstWithForwardReferenceFixupPass();
return spirvToolsRunPass(mod, std::move(token), messages);
}
bool SpirvEmitter::spirvToolsTrimCapabilities(std::vector<uint32_t> *mod,
std::string *messages) {
spvtools::Optimizer::PassToken token = spvtools::CreateTrimCapabilitiesPass();
return spirvToolsRunPass(mod, std::move(token), messages);
}
bool SpirvEmitter::spirvToolsUpgradeToVulkanMemoryModel(
std::vector<uint32_t> *mod, std::string *messages) {
spvtools::Optimizer::PassToken token =
spvtools::CreateUpgradeMemoryModelPass();
return spirvToolsRunPass(mod, std::move(token), messages);
}
bool SpirvEmitter::spirvToolsOptimize(std::vector<uint32_t> *mod,
std::string *messages) {
spvtools::Optimizer optimizer(featureManager.getTargetEnv());
optimizer.SetMessageConsumer(
[messages](spv_message_level_t /*level*/, const char * /*source*/,
const spv_position_t & /*position*/,
const char *message) { *messages += message; });
string::RawOstreamBuf printAllBuf(llvm::errs());
std::ostream printAllOS(&printAllBuf);
if (spirvOptions.printAll)
optimizer.SetPrintAll(&printAllOS);
spvtools::OptimizerOptions options;
options.set_run_validator(false);
options.set_preserve_bindings(spirvOptions.preserveBindings);
options.set_max_id_bound(spirvOptions.maxId);
if (spirvOptions.optConfig.empty()) {
// Add performance passes.
optimizer.RegisterPerformancePasses(spirvOptions.preserveInterface);
// Add propagation of volatile semantics passes.
optimizer.RegisterPass(spvtools::CreateSpreadVolatileSemanticsPass());
// Add compact ID pass.
optimizer.RegisterPass(spvtools::CreateCompactIdsPass());
} else {
// Command line options use llvm::SmallVector and llvm::StringRef, whereas
// SPIR-V optimizer uses std::vector and std::string.
std::vector<std::string> stdFlags;
for (const auto &f : spirvOptions.optConfig)
stdFlags.push_back(f.str());
if (!optimizer.RegisterPassesFromFlags(stdFlags))
return false;
}
return optimizer.Run(mod->data(), mod->size(), mod, options);
}
bool SpirvEmitter::spirvToolsLegalize(std::vector<uint32_t> *mod,
std::string *messages,
const std::vector<DescriptorSetAndBinding>
*dsetbindingsToCombineImageSampler) {
spvtools::Optimizer optimizer(featureManager.getTargetEnv());
optimizer.SetMessageConsumer(
[messages](spv_message_level_t /*level*/, const char * /*source*/,
const spv_position_t & /*position*/,
const char *message) { *messages += message; });
string::RawOstreamBuf printAllBuf(llvm::errs());
std::ostream printAllOS(&printAllBuf);
if (spirvOptions.printAll)
optimizer.SetPrintAll(&printAllOS);
spvtools::OptimizerOptions options;
options.set_run_validator(false);
options.set_preserve_bindings(spirvOptions.preserveBindings);
options.set_max_id_bound(spirvOptions.maxId);
// Add interface variable SROA if the signature packing is enabled.
if (spirvOptions.signaturePacking) {
optimizer.RegisterPass(
spvtools::CreateInterfaceVariableScalarReplacementPass());
}
optimizer.RegisterLegalizationPasses(spirvOptions.preserveInterface);
// Add flattening of resources if needed.
if (spirvOptions.flattenResourceArrays) {
optimizer.RegisterPass(
spvtools::CreateReplaceDescArrayAccessUsingVarIndexPass());
optimizer.RegisterPass(
spvtools::CreateAggressiveDCEPass(spirvOptions.preserveInterface));
optimizer.RegisterPass(
spvtools::CreateDescriptorArrayScalarReplacementPass());
optimizer.RegisterPass(
spvtools::CreateAggressiveDCEPass(spirvOptions.preserveInterface));
}
if (declIdMapper.requiresFlatteningCompositeResources()) {
optimizer.RegisterPass(
spvtools::CreateDescriptorCompositeScalarReplacementPass());
// ADCE should be run after desc_sroa in order to remove potentially
// illegal types such as structures containing opaque types.
optimizer.RegisterPass(
spvtools::CreateAggressiveDCEPass(spirvOptions.preserveInterface));
}
if (dsetbindingsToCombineImageSampler &&
!dsetbindingsToCombineImageSampler->empty()) {
optimizer.RegisterPass(spvtools::CreateConvertToSampledImagePass(
*dsetbindingsToCombineImageSampler));
// ADCE should be run after combining images and samplers in order to
// remove potentially illegal types such as structures containing opaque
// types.
optimizer.RegisterPass(
spvtools::CreateAggressiveDCEPass(spirvOptions.preserveInterface));
}
if (spirvOptions.reduceLoadSize) {
// The threshold must be bigger than 1.0 to reduce all possible loads.
optimizer.RegisterPass(spvtools::CreateReduceLoadSizePass(1.1));
// ADCE should be run after reduce-load-size pass in order to remove
// dead instructions.
optimizer.RegisterPass(
spvtools::CreateAggressiveDCEPass(spirvOptions.preserveInterface));
}
optimizer.RegisterPass(spvtools::CreateReplaceInvalidOpcodePass());
optimizer.RegisterPass(spvtools::CreateCompactIdsPass());
optimizer.RegisterPass(spvtools::CreateSpreadVolatileSemanticsPass());
if (spirvOptions.fixFuncCallArguments) {
optimizer.RegisterPass(spvtools::CreateFixFuncCallArgumentsPass());
}
return optimizer.Run(mod->data(), mod->size(), mod, options);
}
SpirvInstruction *
SpirvEmitter::doUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *expr) {
// TODO: We support only `sizeof()`. Support other kinds.
if (expr->getKind() != clang::UnaryExprOrTypeTrait::UETT_SizeOf) {
emitError("expression class '%0' unimplemented", expr->getExprLoc())
<< expr->getStmtClassName();
return nullptr;
}
if (auto *constExpr =
constEvaluator.tryToEvaluateAsConst(expr, isSpecConstantMode)) {
constExpr->setRValue();
return constExpr;
}
AlignmentSizeCalculator alignmentCalc(astContext, spirvOptions);
uint32_t size = 0, stride = 0;
std::tie(std::ignore, size) = alignmentCalc.getAlignmentAndSize(
expr->getArgumentType(), SpirvLayoutRule::Scalar,
/*isRowMajor*/ llvm::None, &stride);
auto *sizeConst = spvBuilder.getConstantInt(astContext.UnsignedIntTy,
llvm::APInt(32, size));
sizeConst->setRValue();
return sizeConst;
}
std::vector<SpirvInstruction *>
SpirvEmitter::decomposeToScalars(SpirvInstruction *inst) {
QualType elementType;
uint32_t elementCount = 0;
uint32_t numOfRows = 0;
uint32_t numOfCols = 0;
QualType resultType = inst->getAstResultType();
if (hlsl::IsHLSLResourceType(resultType)) {
resultType = hlsl::GetHLSLResourceResultType(resultType);
}
if (isScalarType(resultType)) {
return {inst};
}
if (isVectorType(resultType, &elementType, &elementCount)) {
std::vector<SpirvInstruction *> result;
for (uint32_t i = 0; i < elementCount; i++) {
auto *element = spvBuilder.createCompositeExtract(
elementType, inst, {i}, inst->getSourceLocation());
element->setLayoutRule(inst->getLayoutRule());
result.push_back(element);
}
return result;
}
if (isMxNMatrix(resultType, &elementType, &numOfRows, &numOfCols)) {
std::vector<SpirvInstruction *> result;
for (uint32_t i = 0; i < numOfRows; i++) {
for (uint32_t j = 0; j < numOfCols; j++) {
auto *element = spvBuilder.createCompositeExtract(
elementType, inst, {i, j}, inst->getSourceLocation());
element->setLayoutRule(inst->getLayoutRule());
result.push_back(element);
}
}
return result;
}
if (isArrayType(resultType, &elementType, &elementCount)) {
std::vector<SpirvInstruction *> result;
for (uint32_t i = 0; i < elementCount; i++) {
auto *element = spvBuilder.createCompositeExtract(
elementType, inst, {i}, inst->getSourceLocation());
element->setLayoutRule(inst->getLayoutRule());
auto decomposedElement = decomposeToScalars(element);
// See how we can improve the performance by avoiding this copy.
result.insert(result.end(), decomposedElement.begin(),
decomposedElement.end());
}
return result;
}
if (const RecordType *recordType = resultType->getAs<RecordType>()) {
std::vector<SpirvInstruction *> result;
const SpirvType *type = nullptr;
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
type = lowerTypeVisitor.lowerType(resultType, inst->getLayoutRule(), false,
inst->getSourceLocation());
forEachSpirvField(
recordType, dyn_cast<StructType>(type),
[this, inst, &result](size_t spirvFieldIndex, const QualType &fieldType,
const StructType::FieldInfo &fieldInfo) {
auto *field = spvBuilder.createCompositeExtract(
fieldType, inst, {fieldInfo.fieldIndex},
inst->getSourceLocation());
field->setLayoutRule(inst->getLayoutRule());
auto decomposedField = decomposeToScalars(field);
// See how we can improve the performance by avoiding this copy.
result.insert(result.end(), decomposedField.begin(),
decomposedField.end());
return true;
},
true);
return result;
}
llvm_unreachable("Trying to decompose a type that we cannot decompose");
return {};
}
SpirvInstruction *
SpirvEmitter::generateFromScalars(QualType type,
std::vector<SpirvInstruction *> &scalars,
SpirvLayoutRule layoutRule) {
QualType elementType;
uint32_t elementCount = 0;
uint32_t numOfRows = 0;
uint32_t numOfCols = 0;
assert(!scalars.empty());
auto sourceLocation = scalars[0]->getSourceLocation();
if (isScalarType(type)) {
// If the type is bool with a non-void layout rule, then it should be
// treated as a uint.
assert(layoutRule == SpirvLayoutRule::Void &&
"If the layout type is not void, then we should cast to an int when "
"type is a boolean.");
QualType sourceType = scalars[0]->getAstResultType();
if (sourceType->isBooleanType() &&
scalars[0]->getLayoutRule() != SpirvLayoutRule::Void) {
sourceType = astContext.UnsignedIntTy;
}
SpirvInstruction *result =
castToType(scalars[0], sourceType, type, sourceLocation);
scalars.erase(scalars.begin());
return result;
} else if (isVectorType(type, &elementType, &elementCount)) {
assert(elementCount <= scalars.size());
std::vector<SpirvInstruction *> elements;
for (uint32_t i = 0; i < elementCount; ++i) {
elements.push_back(castToType(scalars[i], scalars[i]->getAstResultType(),
elementType,
scalars[i]->getSourceLocation()));
}
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, elements, sourceLocation);
result->setLayoutRule(layoutRule);
scalars.erase(scalars.begin(), scalars.begin() + elementCount);
return result;
} else if (isMxNMatrix(type, &elementType, &numOfRows, &numOfCols)) {
std::vector<SpirvInstruction *> rows;
QualType rowType = astContext.getExtVectorType(elementType, numOfCols);
for (uint32_t i = 0; i < numOfRows; i++) {
std::vector<SpirvInstruction *> row;
for (uint32_t j = 0; j < numOfCols; j++) {
row.push_back(castToType(scalars[j], scalars[j]->getAstResultType(),
elementType, scalars[j]->getSourceLocation()));
}
scalars.erase(scalars.begin(), scalars.begin() + numOfCols);
SpirvInstruction *r =
spvBuilder.createCompositeConstruct(rowType, row, sourceLocation);
r->setLayoutRule(layoutRule);
rows.push_back(r);
}
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, rows, sourceLocation);
result->setLayoutRule(layoutRule);
return result;
} else if (isArrayType(type, &elementType, &elementCount)) {
std::vector<SpirvInstruction *> elements;
for (uint32_t i = 0; i < elementCount; i++) {
elements.push_back(generateFromScalars(elementType, scalars, layoutRule));
}
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, elements, sourceLocation);
result->setLayoutRule(layoutRule);
return result;
} else if (const RecordType *recordType = dyn_cast<RecordType>(type)) {
std::vector<SpirvInstruction *> elements;
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const SpirvType *spirvType =
lowerTypeVisitor.lowerType(type, layoutRule, false, sourceLocation);
forEachSpirvField(recordType, dyn_cast<StructType>(spirvType),
[this, &elements, &scalars, layoutRule](
size_t spirvFieldIndex, const QualType &fieldType,
const StructType::FieldInfo &fieldInfo) {
elements.push_back(generateFromScalars(
fieldType, scalars, layoutRule));
return true;
});
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, elements, sourceLocation);
result->setLayoutRule(layoutRule);
return result;
} else {
llvm_unreachable("Trying to generate a type that we cannot generate");
}
return {};
}
SpirvInstruction *
SpirvEmitter::splatScalarToGenerate(QualType type, SpirvInstruction *scalar,
SpirvLayoutRule layoutRule) {
QualType elementType;
uint32_t elementCount = 0;
uint32_t numOfRows = 0;
uint32_t numOfCols = 0;
SourceLocation sourceLocation = scalar->getSourceLocation();
if (isScalarType(type)) {
// If the type is bool with a non-void layout rule, then it should be
// treated as a uint.
assert(layoutRule == SpirvLayoutRule::Void &&
"If the layout type is not void, then we should cast to an int when "
"type is a boolean.");
QualType sourceType = scalar->getAstResultType();
if (sourceType->isBooleanType() &&
scalar->getLayoutRule() != SpirvLayoutRule::Void) {
sourceType = astContext.UnsignedIntTy;
}
SpirvInstruction *result =
castToType(scalar, sourceType, type, scalar->getSourceLocation());
return result;
} else if (isVectorType(type, &elementType, &elementCount)) {
SpirvInstruction *element =
castToType(scalar, scalar->getAstResultType(), elementType,
scalar->getSourceLocation());
std::vector<SpirvInstruction *> elements(elementCount, element);
SpirvInstruction *result = spvBuilder.createCompositeConstruct(
type, elements, scalar->getSourceLocation());
result->setLayoutRule(layoutRule);
return result;
} else if (isMxNMatrix(type, &elementType, &numOfRows, &numOfCols)) {
SpirvInstruction *element =
castToType(scalar, scalar->getAstResultType(), elementType,
scalar->getSourceLocation());
assert(element);
std::vector<SpirvInstruction *> row(numOfCols, element);
QualType rowType = astContext.getExtVectorType(elementType, numOfCols);
SpirvInstruction *r =
spvBuilder.createCompositeConstruct(rowType, row, sourceLocation);
r->setLayoutRule(layoutRule);
std::vector<SpirvInstruction *> rows(numOfRows, r);
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, rows, sourceLocation);
result->setLayoutRule(layoutRule);
return result;
} else if (isArrayType(type, &elementType, &elementCount)) {
SpirvInstruction *element =
splatScalarToGenerate(elementType, scalar, layoutRule);
std::vector<SpirvInstruction *> elements(elementCount, element);
SpirvInstruction *result = spvBuilder.createCompositeConstruct(
type, elements, scalar->getSourceLocation());
result->setLayoutRule(layoutRule);
return result;
} else if (const RecordType *recordType = dyn_cast<RecordType>(type)) {
std::vector<SpirvInstruction *> elements;
LowerTypeVisitor lowerTypeVisitor(astContext, spvContext, spirvOptions,
spvBuilder);
const SpirvType *spirvType = lowerTypeVisitor.lowerType(
type, SpirvLayoutRule::Void, false, sourceLocation);
forEachSpirvField(recordType, dyn_cast<StructType>(spirvType),
[this, &elements, &scalar, layoutRule](
size_t spirvFieldIndex, const QualType &fieldType,
const StructType::FieldInfo &fieldInfo) {
elements.push_back(splatScalarToGenerate(
fieldType, scalar, layoutRule));
return true;
});
SpirvInstruction *result =
spvBuilder.createCompositeConstruct(type, elements, sourceLocation);
result->setLayoutRule(layoutRule);
return result;
} else {
llvm_unreachable("Trying to generate a type that we cannot generate");
}
return {};
}
bool SpirvEmitter::UpgradeToVulkanMemoryModelIfNeeded(
std::vector<uint32_t> *module) {
// DXC generates code assuming the vulkan memory model is not used. However,
// if a feature is used that requires the Vulkan memory model, then some code
// may need to be rewritten.
if (!spirvOptions.useVulkanMemoryModel &&
!spvBuilder.hasCapability(spv::Capability::VulkanMemoryModel))
return true;
std::string messages;
if (!spirvToolsUpgradeToVulkanMemoryModel(module, &messages)) {
emitFatalError("failed to use the vulkan memory model: %0", {}) << messages;
emitNote("please file a bug report on "
"https://github.com/Microsoft/DirectXShaderCompiler/issues "
"with source code if possible",
{});
return false;
}
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/BlockReadableOrder.h | //===--- BlockReadableOrder.h - Visit blocks in human readable order ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The SPIR-V spec requires code blocks to appear in an order satisfying the
// dominator-tree direction (ie, dominator before the dominated). This is,
// actually, easy to achieve: any pre-order CFG traversal algorithm will do it.
// Because such algorithms visit a block only after traversing some path to it
// from the root, they necessarily visit the block's immediate dominator first.
//
// But not every graph-traversal algorithm outputs blocks in an order that
// appears logical to human readers. The problem is that unrelated branches may
// be interspersed with each other, and merge blocks may come before some of the
// branches being merged.
//
// A good, human-readable order of blocks may be achieved by performing
// depth-first search but delaying continue and merge nodes until after all
// their branches have been visited. This is implemented below by the
// BlockReadableOrderVisitor.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_BLOCKREADABLEORDER_H
#define LLVM_CLANG_LIB_SPIRV_BLOCKREADABLEORDER_H
#include "clang/SPIRV/SpirvBasicBlock.h"
#include "llvm/ADT/DenseSet.h"
namespace clang {
namespace spirv {
/// \brief A basic block visitor traversing basic blocks in a human readable
/// order and calling a pre-set callback on each basic block.
class BlockReadableOrderVisitor {
public:
explicit BlockReadableOrderVisitor(std::function<void(SpirvBasicBlock *)> cb)
: callback(cb) {}
/// \brief Recursively visits all blocks reachable from the given starting
/// basic block in a depth-first manner and calls the callback passed-in
/// during construction on each basic block.
void visit(SpirvBasicBlock *block);
private:
std::function<void(SpirvBasicBlock *)> callback;
llvm::DenseSet<SpirvBasicBlock *> doneBlocks; ///< Blocks already visited
llvm::DenseSet<SpirvBasicBlock *> todoBlocks; ///< Blocks to be visited later
};
} // end namespace spirv
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/String.cpp | //===-- String.cpp - SPIR-V Strings -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/SPIRV/String.h"
#include <assert.h>
namespace clang {
namespace spirv {
namespace string {
/// \brief Reinterprets a given string as sequence of words.
std::vector<uint32_t> encodeSPIRVString(llvm::StringRef strChars) {
// Initialize all words to 0.
size_t numChars = strChars.size();
std::vector<uint32_t> result(numChars / 4 + 1, 0);
// From the SPIR-V spec, literal string is
//
// A nul-terminated stream of characters consuming an integral number of
// words. The character set is Unicode in the UTF-8 encoding scheme. The UTF-8
// octets (8-bit bytes) are packed four per word, following the little-endian
// convention (i.e., the first octet is in the lowest-order 8 bits of the
// word). The final word contains the string's nul-termination character (0),
// and all contents past the end of the string in the final word are padded
// with 0.
//
// So the following works on little endian machines.
char *strDest = reinterpret_cast<char *>(result.data());
strncpy(strDest, strChars.data(), numChars);
return result;
}
/// \brief Reinterprets the given vector of 32-bit words as a string.
/// Expectes that the words represent a NULL-terminated string.
/// Assumes Little Endian architecture.
std::string decodeSPIRVString(llvm::ArrayRef<uint32_t> strWords) {
if (!strWords.empty()) {
return reinterpret_cast<const char *>(strWords.data());
}
return "";
}
} // end namespace string
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SpirvContext.cpp | //===--- SpirvContext.cpp - SPIR-V SpirvContext implementation-------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <algorithm>
#include <tuple>
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvModule.h"
namespace clang {
namespace spirv {
SpirvContext::SpirvContext()
: allocator(), voidType(nullptr), boolType(nullptr), sintTypes({}),
uintTypes({}), floatTypes({}), samplerType(nullptr),
curShaderModelKind(ShaderModelKind::Invalid), majorVersion(0),
minorVersion(0), currentLexicalScope(nullptr) {
voidType = new (this) VoidType;
boolType = new (this) BoolType;
samplerType = new (this) SamplerType;
accelerationStructureTypeNV = new (this) AccelerationStructureTypeNV;
rayQueryTypeKHR = new (this) RayQueryTypeKHR;
}
SpirvContext::~SpirvContext() {
voidType->~VoidType();
boolType->~BoolType();
samplerType->~SamplerType();
accelerationStructureTypeNV->~AccelerationStructureTypeNV();
rayQueryTypeKHR->~RayQueryTypeKHR();
for (auto *sintType : sintTypes)
if (sintType) // sintTypes may contain nullptr
sintType->~IntegerType();
for (auto *uintType : uintTypes)
if (uintType) // uintTypes may contain nullptr
uintType->~IntegerType();
for (auto *floatType : floatTypes)
if (floatType) // floatTypes may contain nullptr
floatType->~FloatType();
for (auto &pair : vecTypes)
for (auto *vecType : pair.second)
if (vecType) // vecTypes may contain nullptr
vecType->~VectorType();
for (auto &pair : matTypes)
for (auto *matType : pair.second)
matType->~MatrixType();
for (auto *arrType : arrayTypes)
arrType->~ArrayType();
for (auto *raType : runtimeArrayTypes)
raType->~RuntimeArrayType();
for (auto *fnType : functionTypes)
fnType->~FunctionType();
for (auto *structType : structTypes)
structType->~StructType();
for (auto *hybridStructType : hybridStructTypes)
hybridStructType->~HybridStructType();
for (auto pair : sampledImageTypes)
pair.second->~SampledImageType();
for (auto *hybridSampledImageType : hybridSampledImageTypes)
hybridSampledImageType->~HybridSampledImageType();
for (auto *imgType : imageTypes)
imgType->~ImageType();
for (auto &pair : pointerTypes)
for (auto &scPtrTypePair : pair.second)
scPtrTypePair.second->~SpirvPointerType();
for (auto *hybridPtrType : hybridPointerTypes)
hybridPtrType->~HybridPointerType();
for (auto &typePair : debugTypes)
typePair.second->releaseMemory();
for (auto &typePair : typeTemplates)
typePair.second->releaseMemory();
for (auto &typePair : typeTemplateParams)
typePair.second->releaseMemory();
for (auto &pair : spirvIntrinsicTypesById) {
assert(pair.second);
pair.second->~SpirvIntrinsicType();
}
for (auto *spirvIntrinsicType : spirvIntrinsicTypes) {
spirvIntrinsicType->~SpirvIntrinsicType();
}
}
inline uint32_t log2ForBitwidth(uint32_t bitwidth) {
assert(bitwidth >= 8 && bitwidth <= 64 && llvm::isPowerOf2_32(bitwidth));
return llvm::Log2_32(bitwidth);
}
const IntegerType *SpirvContext::getSIntType(uint32_t bitwidth) {
auto &type = sintTypes[log2ForBitwidth(bitwidth)];
if (type == nullptr) {
type = new (this) IntegerType(bitwidth, true);
}
return type;
}
const IntegerType *SpirvContext::getUIntType(uint32_t bitwidth) {
auto &type = uintTypes[log2ForBitwidth(bitwidth)];
if (type == nullptr) {
type = new (this) IntegerType(bitwidth, false);
}
return type;
}
const FloatType *SpirvContext::getFloatType(uint32_t bitwidth) {
auto &type = floatTypes[log2ForBitwidth(bitwidth)];
if (type == nullptr) {
type = new (this) FloatType(bitwidth);
}
return type;
}
const VectorType *SpirvContext::getVectorType(const SpirvType *elemType,
uint32_t count) {
// We are certain this should be a scalar type. Otherwise, cast causes an
// assertion failure.
const ScalarType *scalarType = cast<ScalarType>(elemType);
assert(count == 2 || count == 3 || count == 4);
auto found = vecTypes.find(scalarType);
if (found != vecTypes.end()) {
auto &type = found->second[count];
if (type != nullptr)
return type;
} else {
// Make sure to initialize since std::array is "an aggregate type with the
// same semantics as a struct holding a C-style array T[N]".
vecTypes[scalarType] = {};
}
return vecTypes[scalarType][count] = new (this) VectorType(scalarType, count);
}
const SpirvType *SpirvContext::getMatrixType(const SpirvType *elemType,
uint32_t count) {
// We are certain this should be a vector type. Otherwise, cast causes an
// assertion failure.
const VectorType *vecType = cast<VectorType>(elemType);
assert(count == 2 || count == 3 || count == 4);
// In the case of non-floating-point matrices, we represent them as array of
// vectors.
if (!isa<FloatType>(vecType->getElementType())) {
return getArrayType(elemType, count, llvm::None);
}
auto foundVec = matTypes.find(vecType);
if (foundVec != matTypes.end()) {
const auto &matVector = foundVec->second;
// Create a temporary object for finding in the vector.
MatrixType type(vecType, count);
for (const auto *cachedType : matVector)
if (type == *cachedType)
return cachedType;
}
const auto *ptr = new (this) MatrixType(vecType, count);
matTypes[vecType].push_back(ptr);
return ptr;
}
const ImageType *
SpirvContext::getImageType(const ImageType *imageTypeWithUnknownFormat,
spv::ImageFormat format) {
return getImageType(imageTypeWithUnknownFormat->getSampledType(),
imageTypeWithUnknownFormat->getDimension(),
imageTypeWithUnknownFormat->getDepth(),
imageTypeWithUnknownFormat->isArrayedImage(),
imageTypeWithUnknownFormat->isMSImage(),
imageTypeWithUnknownFormat->withSampler(), format);
}
const ImageType *SpirvContext::getImageType(const SpirvType *sampledType,
spv::Dim dim,
ImageType::WithDepth depth,
bool arrayed, bool ms,
ImageType::WithSampler sampled,
spv::ImageFormat format) {
// We are certain this should be a numerical type. Otherwise, cast causes an
// assertion failure.
const NumericalType *elemType = cast<NumericalType>(sampledType);
// Create a temporary object for finding in the set.
ImageType type(elemType, dim, depth, arrayed, ms, sampled, format);
auto found = imageTypes.find(&type);
if (found != imageTypes.end())
return *found;
auto inserted = imageTypes.insert(
new (this) ImageType(elemType, dim, depth, arrayed, ms, sampled, format));
return *(inserted.first);
}
const SampledImageType *
SpirvContext::getSampledImageType(const ImageType *image) {
auto found = sampledImageTypes.find(image);
if (found != sampledImageTypes.end())
return found->second;
return sampledImageTypes[image] = new (this) SampledImageType(image);
}
const HybridSampledImageType *
SpirvContext::getSampledImageType(QualType image) {
const HybridSampledImageType *result =
new (this) HybridSampledImageType(image);
hybridSampledImageTypes.push_back(result);
return result;
}
const ArrayType *
SpirvContext::getArrayType(const SpirvType *elemType, uint32_t elemCount,
llvm::Optional<uint32_t> arrayStride) {
ArrayType type(elemType, elemCount, arrayStride);
auto found = arrayTypes.find(&type);
if (found != arrayTypes.end())
return *found;
auto inserted =
arrayTypes.insert(new (this) ArrayType(elemType, elemCount, arrayStride));
// The return value is an (iterator, bool) pair. The boolean indicates whether
// it was actually added as a new type.
return *(inserted.first);
}
const RuntimeArrayType *
SpirvContext::getRuntimeArrayType(const SpirvType *elemType,
llvm::Optional<uint32_t> arrayStride) {
RuntimeArrayType type(elemType, arrayStride);
auto found = runtimeArrayTypes.find(&type);
if (found != runtimeArrayTypes.end())
return *found;
auto inserted = runtimeArrayTypes.insert(
new (this) RuntimeArrayType(elemType, arrayStride));
return *(inserted.first);
}
const StructType *
SpirvContext::getStructType(llvm::ArrayRef<StructType::FieldInfo> fields,
llvm::StringRef name, bool isReadOnly,
StructInterfaceType interfaceType) {
// We are creating a temporary struct type here for querying whether the
// same type was already created. It is a little bit costly, but we can
// avoid allocating directly from the bump pointer allocator, from which
// then we are unable to reclaim until the allocator itself is destroyed.
StructType type(fields, name, isReadOnly, interfaceType);
auto found = std::find_if(
structTypes.begin(), structTypes.end(),
[&type](const StructType *cachedType) { return type == *cachedType; });
if (found != structTypes.end())
return *found;
structTypes.push_back(
new (this) StructType(fields, name, isReadOnly, interfaceType));
return structTypes.back();
}
const HybridStructType *SpirvContext::getHybridStructType(
llvm::ArrayRef<HybridStructType::FieldInfo> fields, llvm::StringRef name,
bool isReadOnly, StructInterfaceType interfaceType) {
const HybridStructType *result =
new (this) HybridStructType(fields, name, isReadOnly, interfaceType);
hybridStructTypes.push_back(result);
return result;
}
const SpirvPointerType *SpirvContext::getPointerType(const SpirvType *pointee,
spv::StorageClass sc) {
auto foundPointee = pointerTypes.find(pointee);
if (foundPointee != pointerTypes.end()) {
auto &pointeeMap = foundPointee->second;
auto foundSC = pointeeMap.find(sc);
if (foundSC != pointeeMap.end())
return foundSC->second;
}
return pointerTypes[pointee][sc] = new (this) SpirvPointerType(pointee, sc);
}
const HybridPointerType *SpirvContext::getPointerType(QualType pointee,
spv::StorageClass sc) {
const HybridPointerType *result = new (this) HybridPointerType(pointee, sc);
hybridPointerTypes.push_back(result);
return result;
}
FunctionType *
SpirvContext::getFunctionType(const SpirvType *ret,
llvm::ArrayRef<const SpirvType *> param) {
// Create a temporary object for finding in the set.
FunctionType type(ret, param);
auto found = functionTypes.find(&type);
if (found != functionTypes.end())
return *found;
auto inserted = functionTypes.insert(new (this) FunctionType(ret, param));
return *inserted.first;
}
const StructType *SpirvContext::getByteAddressBufferType(bool isWritable) {
// Create a uint RuntimeArray.
const auto *raType =
getRuntimeArrayType(getUIntType(32), /* ArrayStride */ 4);
// Create a struct containing the runtime array as its only member.
return getStructType({StructType::FieldInfo(raType, /*fieldIndex*/ 0,
/*name*/ "", /*offset*/ 0)},
isWritable ? "type.RWByteAddressBuffer"
: "type.ByteAddressBuffer",
!isWritable, StructInterfaceType::StorageBuffer);
}
const StructType *SpirvContext::getACSBufferCounterType() {
// Create int32.
const auto *int32Type = getSIntType(32);
// Create a struct containing the integer counter as its only member.
const StructType *type =
getStructType({StructType::FieldInfo(int32Type, /*fieldIndex*/ 0,
"counter", /*offset*/ 0)},
"type.ACSBuffer.counter",
/*isReadOnly*/ false, StructInterfaceType::StorageBuffer);
return type;
}
SpirvDebugType *SpirvContext::getDebugTypeBasic(const SpirvType *spirvType,
llvm::StringRef name,
SpirvConstant *size,
uint32_t encoding) {
// Reuse existing debug type if possible.
if (debugTypes.find(spirvType) != debugTypes.end())
return debugTypes[spirvType];
auto *debugType = new (this) SpirvDebugTypeBasic(name, size, encoding);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugType *
SpirvContext::getDebugTypeMember(llvm::StringRef name, SpirvDebugType *type,
SpirvDebugSource *source, uint32_t line,
uint32_t column, SpirvDebugInstruction *parent,
uint32_t flags, uint32_t offsetInBits,
uint32_t sizeInBits, const APValue *value) {
// NOTE: Do not search it in debugTypes because it would have the same
// spirvType but has different parent i.e., type composite.
SpirvDebugTypeMember *debugType =
new (this) SpirvDebugTypeMember(name, type, source, line, column, parent,
flags, offsetInBits, sizeInBits, value);
return debugType;
}
SpirvDebugTypeComposite *SpirvContext::getDebugTypeComposite(
const SpirvType *spirvType, llvm::StringRef name, SpirvDebugSource *source,
uint32_t line, uint32_t column, SpirvDebugInstruction *parent,
llvm::StringRef linkageName, uint32_t flags, uint32_t tag) {
// Reuse existing debug type if possible.
auto it = debugTypes.find(spirvType);
if (it != debugTypes.end()) {
assert(it->second != nullptr && isa<SpirvDebugTypeComposite>(it->second));
return dyn_cast<SpirvDebugTypeComposite>(it->second);
}
auto *debugType = new (this) SpirvDebugTypeComposite(
name, source, line, column, parent, linkageName, flags, tag);
debugType->setDebugSpirvType(spirvType);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugType *SpirvContext::getDebugType(const SpirvType *spirvType) {
auto it = debugTypes.find(spirvType);
if (it != debugTypes.end())
return it->second;
return nullptr;
}
SpirvDebugType *
SpirvContext::getDebugTypeArray(const SpirvType *spirvType,
SpirvDebugInstruction *elemType,
llvm::ArrayRef<uint32_t> elemCount) {
// Reuse existing debug type if possible.
if (debugTypes.find(spirvType) != debugTypes.end())
return debugTypes[spirvType];
auto *eTy = dyn_cast<SpirvDebugType>(elemType);
assert(eTy && "Element type must be a SpirvDebugType.");
auto *debugType = new (this) SpirvDebugTypeArray(eTy, elemCount);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugType *
SpirvContext::getDebugTypeVector(const SpirvType *spirvType,
SpirvDebugInstruction *elemType,
uint32_t elemCount) {
// Reuse existing debug type if possible.
if (debugTypes.find(spirvType) != debugTypes.end())
return debugTypes[spirvType];
auto *eTy = dyn_cast<SpirvDebugType>(elemType);
assert(eTy && "Element type must be a SpirvDebugType.");
auto *debugType = new (this) SpirvDebugTypeVector(eTy, elemCount);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugType *
SpirvContext::getDebugTypeMatrix(const SpirvType *spirvType,
SpirvDebugInstruction *vectorType,
uint32_t vectorCount) {
// Reuse existing debug type if possible.
if (debugTypes.find(spirvType) != debugTypes.end())
return debugTypes[spirvType];
auto *eTy = dyn_cast<SpirvDebugTypeVector>(vectorType);
assert(eTy && "Element type must be a SpirvDebugTypeVector.");
auto *debugType = new (this) SpirvDebugTypeMatrix(eTy, vectorCount);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugType *
SpirvContext::getDebugTypeFunction(const SpirvType *spirvType, uint32_t flags,
SpirvDebugType *ret,
llvm::ArrayRef<SpirvDebugType *> params) {
// Reuse existing debug type if possible.
if (debugTypes.find(spirvType) != debugTypes.end())
return debugTypes[spirvType];
auto *debugType = new (this) SpirvDebugTypeFunction(flags, ret, params);
debugTypes[spirvType] = debugType;
return debugType;
}
SpirvDebugTypeTemplate *SpirvContext::createDebugTypeTemplate(
const ClassTemplateSpecializationDecl *templateType,
SpirvDebugInstruction *target,
const llvm::SmallVector<SpirvDebugTypeTemplateParameter *, 2> ¶ms) {
auto *tempTy = getDebugTypeTemplate(templateType);
if (tempTy != nullptr)
return tempTy;
tempTy = new (this) SpirvDebugTypeTemplate(target, params);
typeTemplates[templateType] = tempTy;
return tempTy;
}
SpirvDebugTypeTemplate *SpirvContext::getDebugTypeTemplate(
const ClassTemplateSpecializationDecl *templateType) {
auto it = typeTemplates.find(templateType);
if (it != typeTemplates.end())
return it->second;
return nullptr;
}
SpirvDebugTypeTemplateParameter *SpirvContext::createDebugTypeTemplateParameter(
const TemplateArgument *templateArg, llvm::StringRef name,
SpirvDebugType *type, SpirvInstruction *value, SpirvDebugSource *source,
uint32_t line, uint32_t column) {
auto *param = getDebugTypeTemplateParameter(templateArg);
if (param != nullptr)
return param;
param = new (this)
SpirvDebugTypeTemplateParameter(name, type, value, source, line, column);
typeTemplateParams[templateArg] = param;
return param;
}
SpirvDebugTypeTemplateParameter *SpirvContext::getDebugTypeTemplateParameter(
const TemplateArgument *templateArg) {
auto it = typeTemplateParams.find(templateArg);
if (it != typeTemplateParams.end())
return it->second;
return nullptr;
}
void SpirvContext::pushDebugLexicalScope(RichDebugInfo *info,
SpirvDebugInstruction *scope) {
assert((isa<SpirvDebugLexicalBlock>(scope) ||
isa<SpirvDebugFunction>(scope) ||
isa<SpirvDebugCompilationUnit>(scope) ||
isa<SpirvDebugTypeComposite>(scope)) &&
"Given scope is not a lexical scope");
currentLexicalScope = scope;
info->scopeStack.push_back(scope);
}
void SpirvContext::moveDebugTypesToModule(SpirvModule *module) {
for (const auto &typePair : debugTypes) {
module->addDebugInfo(typePair.second);
if (auto *composite = dyn_cast<SpirvDebugTypeComposite>(typePair.second)) {
for (auto *member : composite->getMembers()) {
module->addDebugInfo(member);
}
}
}
for (const auto &typePair : typeTemplates) {
module->addDebugInfo(typePair.second);
}
for (const auto &typePair : typeTemplateParams) {
module->addDebugInfo(typePair.second);
}
debugTypes.clear();
typeTemplates.clear();
typeTemplateParams.clear();
}
const SpirvIntrinsicType *SpirvContext::getOrCreateSpirvIntrinsicType(
unsigned typeId, unsigned typeOpCode,
llvm::ArrayRef<SpvIntrinsicTypeOperand> operands) {
if (spirvIntrinsicTypesById[typeId] == nullptr) {
spirvIntrinsicTypesById[typeId] =
new (this) SpirvIntrinsicType(typeOpCode, operands);
}
return spirvIntrinsicTypesById[typeId];
}
const SpirvIntrinsicType *SpirvContext::getOrCreateSpirvIntrinsicType(
unsigned typeOpCode, llvm::ArrayRef<SpvIntrinsicTypeOperand> operands) {
SpirvIntrinsicType type(typeOpCode, operands);
auto found =
std::find_if(spirvIntrinsicTypes.begin(), spirvIntrinsicTypes.end(),
[&type](const SpirvIntrinsicType *cachedType) {
return type == *cachedType;
});
if (found != spirvIntrinsicTypes.end())
return *found;
spirvIntrinsicTypes.push_back(new (this)
SpirvIntrinsicType(typeOpCode, operands));
return spirvIntrinsicTypes.back();
}
SpirvIntrinsicType *
SpirvContext::getCreatedSpirvIntrinsicType(unsigned typeId) {
if (spirvIntrinsicTypesById.find(typeId) == spirvIntrinsicTypesById.end()) {
return nullptr;
}
return spirvIntrinsicTypesById[typeId];
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/ConstEvaluator.h | //===-------- ConstEvaluator.h ----- Translate Constants --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file defines methods for translating AST expressions to SPIR-V
// constants.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SPIRV_CONSTEVALUATOR_H
#define LLVM_CLANG_SPIRV_CONSTEVALUATOR_H
#include "clang/AST/ASTContext.h"
#include "clang/SPIRV/SpirvBuilder.h"
#include "llvm/ADT/APInt.h"
namespace clang {
namespace spirv {
class ConstEvaluator {
public:
ConstEvaluator(ASTContext &astContext, SpirvBuilder &spvBuilder)
: astContext(astContext), spvBuilder(spvBuilder) {}
/// Translates the given frontend APInt into its SPIR-V equivalent for the
/// given targetType.
SpirvConstant *translateAPInt(const llvm::APInt &intValue,
const QualType targetType,
bool isSpecConstantMode);
/// Translates the given frontend APFloat into its SPIR-V equivalent for the
/// given targetType.
SpirvConstant *translateAPFloat(llvm::APFloat floatValue, QualType targetType,
bool isSpecConstantMode);
/// Translates the given frontend APValue into its SPIR-V equivalent for the
/// given targetType.
SpirvConstant *translateAPValue(const APValue &value,
const QualType targetType,
bool isSpecConstantMode);
/// Tries to evaluate the given APInt as a 32-bit integer. If the evaluation
/// can be performed without loss, it returns the <result-id> of the SPIR-V
/// constant for that value.
SpirvConstant *tryToEvaluateAsInt32(const llvm::APInt &intValue,
bool isSigned);
/// Tries to evaluate the given APFloat as a 32-bit float. If the evaluation
/// can be performed without loss, it returns the <result-id> of the SPIR-V
SpirvConstant *tryToEvaluateAsFloat32(const llvm::APFloat &floatValue,
bool isSpecConstantMode);
/// Tries to evaluate the given Expr as a constant and returns the <result-id>
/// if success. Otherwise, returns 0.
SpirvConstant *tryToEvaluateAsConst(const Expr *expr,
bool isSpecConstantMode);
private:
/// Emits error to the diagnostic engine associated with the AST context.
template <unsigned N>
DiagnosticBuilder emitError(const char (&message)[N],
SourceLocation srcLoc = {}) {
const auto diagId = astContext.getDiagnostics().getCustomDiagID(
clang::DiagnosticsEngine::Error, message);
return astContext.getDiagnostics().Report(srcLoc, diagId);
}
ASTContext &astContext;
SpirvBuilder &spvBuilder;
};
} // namespace spirv
} // namespace clang
#endif // LLVM_CLANG_SPIRV_CONSTEVALUATOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/SignaturePackingUtil.cpp | //===--- SignaturePackingUtil.cpp - Util functions impl ----------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "SignaturePackingUtil.h"
#include <algorithm>
namespace clang {
namespace spirv {
namespace {
const uint32_t kNumComponentsInFullyUsedLocation = 4;
/// A class for managing stage input/output packed locations to avoid duplicate
/// uses of the same location and component.
class PackedLocationAndComponentSet {
public:
PackedLocationAndComponentSet(SpirvBuilder &spirvBuilder,
llvm::function_ref<uint32_t(uint32_t)> nextLocs)
: spvBuilder(spirvBuilder), assignLocs(nextLocs) {}
bool assignLocAndComponent(const StageVar *var) {
if (tryReuseLocations(var)) {
return true;
}
return assignNewLocations(var);
}
private:
// When the stage variable |var| needs M components in N locations, checks
// whether used N continuous locations have M unused slots or not. If there
// are N continuous locations that have M unused slots, uses the locations
// and components to pack |var|.
//
// For example, a stage variable `float3 foo[2]` needs 3 components in 2
// locations. Assuming that we already assigned the following locations and
// components to other stage variables:
//
// Used components / nextUnusedComponent
// Location 0: 0 / 1 2
// Location 1: 0 / 1 / 2 3
// Location 2: 0 / 1 2
// Location 3: 0 1
// Location 4: 0 1
// Location 5: 0 / 1 / 2 / 3 4 (full)
//
// we can assign Location 3 and Component 1 to `float3 foo[2]` because
// Location 3 and 4 have 3 unused Component slots (1, 2, 3).
bool tryReuseLocations(const StageVar *var) {
auto requiredLocsAndComponents = var->getLocationAndComponentCount();
for (size_t startLoc = 0; startLoc < nextUnusedComponent.size();
startLoc++) {
uint32_t firstUnusedComponent = 0;
// Check whether |requiredLocsAndComponents.location| locations starting
// from |startLoc| have |requiredLocsAndComponents.component| unused
// components or not. Note that if the number of required slots and used
// slots is greater than 4, we cannot use that location because the
// maximum number of available components for a location is 4.
for (uint32_t i = 0; i < requiredLocsAndComponents.location; ++i) {
if (startLoc + i >= nextUnusedComponent.size() ||
nextUnusedComponent[startLoc + i] +
requiredLocsAndComponents.component >
kNumComponentsInFullyUsedLocation) {
firstUnusedComponent = kNumComponentsInFullyUsedLocation;
break;
}
firstUnusedComponent =
std::max(firstUnusedComponent, nextUnusedComponent[startLoc + i]);
}
if (firstUnusedComponent != kNumComponentsInFullyUsedLocation) {
// Based on Vulkan spec "15.1.5. Component Assignment", a scalar or
// two-component 64-bit data type must not specify a Component
// decoration of 1 or 3.
if (requiredLocsAndComponents.componentAlignment) {
reuseLocations(var, startLoc, 2);
} else {
reuseLocations(var, startLoc, firstUnusedComponent);
}
return true;
}
}
return false;
}
// Creates OpDecorate instructions for |var| with Location |startLoc| and
// Component |componentStart|. Marks used component slots.
void reuseLocations(const StageVar *var, uint32_t startLoc,
uint32_t componentStart) {
auto requiredLocsAndComponents = var->getLocationAndComponentCount();
spvBuilder.decorateLocation(var->getSpirvInstr(), assignedLocs[startLoc]);
spvBuilder.decorateComponent(var->getSpirvInstr(), componentStart);
for (uint32_t i = 0; i < requiredLocsAndComponents.location; ++i) {
nextUnusedComponent[startLoc + i] =
componentStart + requiredLocsAndComponents.component;
}
}
// Pack signature of |var| into new unified stage variables.
bool assignNewLocations(const StageVar *var) {
auto requiredLocsAndComponents = var->getLocationAndComponentCount();
uint32_t loc = assignLocs(requiredLocsAndComponents.location);
spvBuilder.decorateLocation(var->getSpirvInstr(), loc);
uint32_t componentCount = requiredLocsAndComponents.component;
for (uint32_t i = 0; i < requiredLocsAndComponents.location; ++i) {
assignedLocs.push_back(loc + i);
nextUnusedComponent.push_back(componentCount);
}
return true;
}
private:
SpirvBuilder &spvBuilder;
///< A function to assign a new location number.
llvm::function_ref<uint32_t(uint32_t)> assignLocs;
///< A vector of assigned locations.
llvm::SmallVector<uint32_t, 8> assignedLocs;
///< A vector to keep the starting unused component number in each assigned
///< location.
llvm::SmallVector<uint32_t, 8> nextUnusedComponent;
};
} // anonymous namespace
bool packSignatureInternal(
const std::vector<const StageVar *> &vars,
llvm::function_ref<bool(const StageVar *)> assignLocAndComponent,
bool forInput, bool forPCF) {
for (const auto *var : vars) {
auto sigPointKind = var->getSigPoint()->GetKind();
// HS has two types of outputs, one from the shader itself and another from
// patch control function. They have HSCPOut and PCOut SigPointKind,
// respectively. Since we do not know which one comes first at this moment,
// we handle PCOut first. Likewise, DS has DSIn and DSCPIn as its inputs. We
// handle DSIn first.
if (forPCF) {
if (sigPointKind != hlsl::SigPoint::Kind::PCOut &&
sigPointKind != hlsl::SigPoint::Kind::DSIn) {
continue;
}
} else {
if (sigPointKind == hlsl::SigPoint::Kind::PCOut ||
sigPointKind == hlsl::SigPoint::Kind::DSIn) {
continue;
}
}
if (!assignLocAndComponent(var)) {
return false;
}
}
return true;
}
bool packSignature(SpirvBuilder &spvBuilder,
const std::vector<const StageVar *> &vars,
llvm::function_ref<uint32_t(uint32_t)> nextLocs,
bool forInput) {
PackedLocationAndComponentSet packedLocSet(spvBuilder, nextLocs);
auto assignLocationAndComponent = [&packedLocSet](const StageVar *var) {
return packedLocSet.assignLocAndComponent(var);
};
return packSignatureInternal(vars, assignLocationAndComponent, forInput,
true) &&
packSignatureInternal(vars, assignLocationAndComponent, forInput,
false);
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/CapabilityVisitor.cpp | //===--- CapabilityVisitor.cpp - Capability Visitor --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "CapabilityVisitor.h"
#include "clang/SPIRV/SpirvBuilder.h"
namespace clang {
namespace spirv {
void CapabilityVisitor::addExtension(Extension ext, llvm::StringRef target,
SourceLocation loc) {
// Do not emit OpExtension if the given extension is natively supported in
// the target environment.
if (!featureManager.isExtensionRequiredForTargetEnv(ext))
return;
if (featureManager.requestExtension(ext, target, loc))
spvBuilder.requireExtension(featureManager.getExtensionName(ext), loc);
}
bool CapabilityVisitor::addExtensionAndCapabilitiesIfEnabled(
Extension ext, llvm::ArrayRef<spv::Capability> capabilities) {
if (!featureManager.isExtensionEnabled(ext)) {
return false;
}
addExtension(ext, "", {});
for (auto cap : capabilities) {
addCapability(cap);
}
return true;
}
void CapabilityVisitor::addCapability(spv::Capability cap, SourceLocation loc) {
if (cap != spv::Capability::Max) {
spvBuilder.requireCapability(cap, loc);
}
}
void CapabilityVisitor::addCapabilityForType(const SpirvType *type,
SourceLocation loc,
spv::StorageClass sc) {
// Defend against instructions that do not have a return type.
if (!type)
return;
// Integer-related capabilities
if (const auto *intType = dyn_cast<IntegerType>(type)) {
switch (intType->getBitwidth()) {
case 8: {
addCapability(spv::Capability::Int8);
break;
}
case 16: {
// Usage of a 16-bit integer type.
addCapability(spv::Capability::Int16);
// Usage of a 16-bit integer type as stage I/O.
if (sc == spv::StorageClass::Input || sc == spv::StorageClass::Output) {
addExtension(Extension::KHR_16bit_storage, "16-bit stage IO variables",
loc);
addCapability(spv::Capability::StorageInputOutput16);
}
break;
}
case 64: {
addCapability(spv::Capability::Int64);
break;
}
default:
break;
}
}
// Float-related capabilities
else if (const auto *floatType = dyn_cast<FloatType>(type)) {
switch (floatType->getBitwidth()) {
case 16: {
// Usage of a 16-bit float type.
addCapability(spv::Capability::Float16);
// Usage of a 16-bit float type as stage I/O.
if (sc == spv::StorageClass::Input || sc == spv::StorageClass::Output) {
addExtension(Extension::KHR_16bit_storage, "16-bit stage IO variables",
loc);
addCapability(spv::Capability::StorageInputOutput16);
}
break;
}
case 64: {
addCapability(spv::Capability::Float64);
break;
}
default:
break;
}
}
// Vectors
else if (const auto *vecType = dyn_cast<VectorType>(type)) {
addCapabilityForType(vecType->getElementType(), loc, sc);
}
// Matrices
else if (const auto *matType = dyn_cast<MatrixType>(type)) {
addCapabilityForType(matType->getElementType(), loc, sc);
}
// Arrays
else if (const auto *arrType = dyn_cast<ArrayType>(type)) {
addCapabilityForType(arrType->getElementType(), loc, sc);
}
// Runtime array of resources requires additional capability.
else if (const auto *raType = dyn_cast<RuntimeArrayType>(type)) {
if (SpirvType::isResourceType(raType->getElementType())) {
// the elements inside the runtime array are resources
addExtension(Extension::EXT_descriptor_indexing,
"runtime array of resources", loc);
addCapability(spv::Capability::RuntimeDescriptorArrayEXT);
}
addCapabilityForType(raType->getElementType(), loc, sc);
}
// Image types
else if (const auto *imageType = dyn_cast<ImageType>(type)) {
switch (imageType->getDimension()) {
case spv::Dim::Buffer: {
addCapability(spv::Capability::SampledBuffer);
if (imageType->withSampler() == ImageType::WithSampler::No) {
addCapability(spv::Capability::ImageBuffer);
}
break;
}
case spv::Dim::Dim1D: {
if (imageType->withSampler() == ImageType::WithSampler::No) {
addCapability(spv::Capability::Image1D);
} else {
addCapability(spv::Capability::Sampled1D);
}
break;
}
case spv::Dim::SubpassData: {
addCapability(spv::Capability::InputAttachment);
break;
}
default:
break;
}
switch (imageType->getImageFormat()) {
case spv::ImageFormat::Rg32f:
case spv::ImageFormat::Rg16f:
case spv::ImageFormat::R11fG11fB10f:
case spv::ImageFormat::R16f:
case spv::ImageFormat::Rgba16:
case spv::ImageFormat::Rgb10A2:
case spv::ImageFormat::Rg16:
case spv::ImageFormat::Rg8:
case spv::ImageFormat::R16:
case spv::ImageFormat::R8:
case spv::ImageFormat::Rgba16Snorm:
case spv::ImageFormat::Rg16Snorm:
case spv::ImageFormat::Rg8Snorm:
case spv::ImageFormat::R16Snorm:
case spv::ImageFormat::R8Snorm:
case spv::ImageFormat::Rg32i:
case spv::ImageFormat::Rg16i:
case spv::ImageFormat::Rg8i:
case spv::ImageFormat::R16i:
case spv::ImageFormat::R8i:
case spv::ImageFormat::Rgb10a2ui:
case spv::ImageFormat::Rg32ui:
case spv::ImageFormat::Rg16ui:
case spv::ImageFormat::Rg8ui:
case spv::ImageFormat::R16ui:
case spv::ImageFormat::R8ui:
addCapability(spv::Capability::StorageImageExtendedFormats);
break;
default:
// Only image formats requiring extended formats are relevant. The rest
// just pass through.
break;
}
if (const auto *sampledType = imageType->getSampledType()) {
addCapabilityForType(sampledType, loc, sc);
if (const auto *sampledIntType = dyn_cast<IntegerType>(sampledType)) {
if (sampledIntType->getBitwidth() == 64) {
addCapability(spv::Capability::Int64ImageEXT);
addExtension(Extension::EXT_shader_image_int64,
"64-bit image types in resource", loc);
}
}
}
}
// Sampled image type
else if (const auto *sampledImageType = dyn_cast<SampledImageType>(type)) {
addCapabilityForType(sampledImageType->getImageType(), loc, sc);
}
// Pointer type
else if (const auto *ptrType = dyn_cast<SpirvPointerType>(type)) {
addCapabilityForType(ptrType->getPointeeType(), loc, sc);
if (sc == spv::StorageClass::PhysicalStorageBuffer) {
addExtension(Extension::KHR_physical_storage_buffer,
"SPV_KHR_physical_storage_buffer", loc);
addCapability(spv::Capability::PhysicalStorageBufferAddresses);
}
}
// Struct type
else if (const auto *structType = dyn_cast<StructType>(type)) {
if (SpirvType::isOrContainsType<NumericalType, 16>(structType)) {
addExtension(Extension::KHR_16bit_storage, "16-bit types in resource",
loc);
if (sc == spv::StorageClass::PushConstant) {
addCapability(spv::Capability::StoragePushConstant16);
} else if (structType->getInterfaceType() ==
StructInterfaceType::UniformBuffer) {
addCapability(spv::Capability::StorageUniform16);
} else if (structType->getInterfaceType() ==
StructInterfaceType::StorageBuffer) {
addCapability(spv::Capability::StorageUniformBufferBlock16);
}
}
for (auto field : structType->getFields())
addCapabilityForType(field.type, loc, sc);
}
}
bool CapabilityVisitor::visit(SpirvDecoration *decor) {
const auto loc = decor->getSourceLocation();
switch (decor->getDecoration()) {
case spv::Decoration::Sample: {
addCapability(spv::Capability::SampleRateShading, loc);
break;
}
case spv::Decoration::NonUniformEXT: {
addExtension(Extension::EXT_descriptor_indexing, "NonUniformEXT", loc);
addCapability(spv::Capability::ShaderNonUniformEXT);
break;
}
case spv::Decoration::HlslSemanticGOOGLE:
case spv::Decoration::HlslCounterBufferGOOGLE: {
addExtension(Extension::GOOGLE_hlsl_functionality1, "SPIR-V reflection",
loc);
break;
}
case spv::Decoration::PerVertexKHR: {
addExtension(Extension::KHR_fragment_shader_barycentric, "PerVertexKHR",
loc);
addCapability(spv::Capability::FragmentBarycentricKHR);
break;
}
// Capabilities needed for built-ins
case spv::Decoration::BuiltIn: {
AddVulkanMemoryModelForVolatile(decor, loc);
assert(decor->getParams().size() == 1);
const auto builtin = static_cast<spv::BuiltIn>(decor->getParams()[0]);
switch (builtin) {
case spv::BuiltIn::SampleId:
case spv::BuiltIn::SamplePosition: {
addCapability(spv::Capability::SampleRateShading, loc);
break;
}
case spv::BuiltIn::SubgroupSize:
case spv::BuiltIn::NumSubgroups:
case spv::BuiltIn::SubgroupId:
case spv::BuiltIn::SubgroupLocalInvocationId: {
addCapability(spv::Capability::GroupNonUniform, loc);
break;
}
case spv::BuiltIn::BaseVertex: {
addExtension(Extension::KHR_shader_draw_parameters, "BaseVertex Builtin",
loc);
addCapability(spv::Capability::DrawParameters);
break;
}
case spv::BuiltIn::BaseInstance: {
addExtension(Extension::KHR_shader_draw_parameters,
"BaseInstance Builtin", loc);
addCapability(spv::Capability::DrawParameters);
break;
}
case spv::BuiltIn::DrawIndex: {
addExtension(Extension::KHR_shader_draw_parameters, "DrawIndex Builtin",
loc);
addCapability(spv::Capability::DrawParameters);
break;
}
case spv::BuiltIn::DeviceIndex: {
addExtension(Extension::KHR_device_group, "DeviceIndex Builtin", loc);
addCapability(spv::Capability::DeviceGroup);
break;
}
case spv::BuiltIn::FragStencilRefEXT: {
addExtension(Extension::EXT_shader_stencil_export, "SV_StencilRef", loc);
addCapability(spv::Capability::StencilExportEXT);
break;
}
case spv::BuiltIn::ViewIndex: {
addExtension(Extension::KHR_multiview, "SV_ViewID", loc);
addCapability(spv::Capability::MultiView);
break;
}
case spv::BuiltIn::FullyCoveredEXT: {
addExtension(Extension::EXT_fragment_fully_covered, "SV_InnerCoverage",
loc);
addCapability(spv::Capability::FragmentFullyCoveredEXT);
break;
}
case spv::BuiltIn::PrimitiveId: {
// PrimitiveID can be used as PSIn or MSPOut.
if (shaderModel == spv::ExecutionModel::Fragment ||
shaderModel == spv::ExecutionModel::MeshNV ||
shaderModel == spv::ExecutionModel::MeshEXT)
addCapability(spv::Capability::Geometry);
break;
}
case spv::BuiltIn::Layer: {
if (shaderModel == spv::ExecutionModel::Vertex ||
shaderModel == spv::ExecutionModel::TessellationControl ||
shaderModel == spv::ExecutionModel::TessellationEvaluation) {
if (featureManager.isTargetEnvVulkan1p2OrAbove()) {
addCapability(spv::Capability::ShaderLayer);
} else {
addExtension(Extension::EXT_shader_viewport_index_layer,
"SV_RenderTargetArrayIndex", loc);
addCapability(spv::Capability::ShaderViewportIndexLayerEXT);
}
} else if (shaderModel == spv::ExecutionModel::Fragment ||
shaderModel == spv::ExecutionModel::MeshNV ||
shaderModel == spv::ExecutionModel::MeshEXT) {
// SV_RenderTargetArrayIndex can be used as PSIn or MSPOut.
addCapability(spv::Capability::Geometry);
}
break;
}
case spv::BuiltIn::ViewportIndex: {
if (shaderModel == spv::ExecutionModel::Vertex ||
shaderModel == spv::ExecutionModel::TessellationControl ||
shaderModel == spv::ExecutionModel::TessellationEvaluation) {
if (featureManager.isTargetEnvVulkan1p2OrAbove()) {
addCapability(spv::Capability::ShaderViewportIndex);
} else {
addExtension(Extension::EXT_shader_viewport_index_layer,
"SV_ViewPortArrayIndex", loc);
addCapability(spv::Capability::ShaderViewportIndexLayerEXT);
}
} else if (shaderModel == spv::ExecutionModel::Fragment ||
shaderModel == spv::ExecutionModel::Geometry ||
shaderModel == spv::ExecutionModel::MeshNV ||
shaderModel == spv::ExecutionModel::MeshEXT) {
// SV_ViewportArrayIndex can be used as PSIn or GSOut or MSPOut.
addCapability(spv::Capability::MultiViewport);
}
break;
}
case spv::BuiltIn::ClipDistance: {
addCapability(spv::Capability::ClipDistance);
break;
}
case spv::BuiltIn::CullDistance: {
addCapability(spv::Capability::CullDistance);
break;
}
case spv::BuiltIn::BaryCoordKHR:
case spv::BuiltIn::BaryCoordNoPerspKHR: {
// SV_Barycentrics will have only two builtins
// But it is still allowed to decorate those two builtins with
// interpolation qualifier like centroid or sample.
addExtension(Extension::KHR_fragment_shader_barycentric,
"SV_Barycentrics", loc);
addCapability(spv::Capability::FragmentBarycentricKHR);
break;
}
case spv::BuiltIn::ShadingRateKHR:
case spv::BuiltIn::PrimitiveShadingRateKHR: {
addExtension(Extension::KHR_fragment_shading_rate, "SV_ShadingRate", loc);
addCapability(spv::Capability::FragmentShadingRateKHR);
break;
}
default:
break;
}
break;
}
default:
break;
}
return true;
}
spv::Capability
CapabilityVisitor::getNonUniformCapability(const SpirvType *type) {
if (!type)
return spv::Capability::Max;
if (const auto *arrayType = dyn_cast<ArrayType>(type)) {
return getNonUniformCapability(arrayType->getElementType());
}
if (SpirvType::isTexture(type) || SpirvType::isSampler(type)) {
return spv::Capability::SampledImageArrayNonUniformIndexingEXT;
}
if (SpirvType::isRWTexture(type)) {
return spv::Capability::StorageImageArrayNonUniformIndexingEXT;
}
if (SpirvType::isBuffer(type)) {
return spv::Capability::UniformTexelBufferArrayNonUniformIndexingEXT;
}
if (SpirvType::isRWBuffer(type)) {
return spv::Capability::StorageTexelBufferArrayNonUniformIndexingEXT;
}
if (SpirvType::isSubpassInput(type) || SpirvType::isSubpassInputMS(type)) {
return spv::Capability::InputAttachmentArrayNonUniformIndexingEXT;
}
return spv::Capability::Max;
}
bool CapabilityVisitor::visit(SpirvImageQuery *instr) {
addCapabilityForType(instr->getResultType(), instr->getSourceLocation(),
instr->getStorageClass());
addCapability(spv::Capability::ImageQuery);
return true;
}
bool CapabilityVisitor::visit(SpirvImageSparseTexelsResident *instr) {
addCapabilityForType(instr->getResultType(), instr->getSourceLocation(),
instr->getStorageClass());
addCapability(spv::Capability::ImageGatherExtended);
addCapability(spv::Capability::SparseResidency);
return true;
}
namespace {
bool isImageOpOnUnknownFormat(const SpirvImageOp *instruction) {
if (!instruction->getImage() || !instruction->getImage()->getResultType()) {
return false;
}
const ImageType *imageType =
dyn_cast<ImageType>(instruction->getImage()->getResultType());
if (!imageType || imageType->getImageFormat() != spv::ImageFormat::Unknown) {
return false;
}
return imageType->getImageFormat() == spv::ImageFormat::Unknown;
}
} // namespace
bool CapabilityVisitor::visit(SpirvImageOp *instr) {
addCapabilityForType(instr->getResultType(), instr->getSourceLocation(),
instr->getStorageClass());
if (instr->hasOffset() || instr->hasConstOffsets())
addCapability(spv::Capability::ImageGatherExtended);
if (instr->isSparse())
addCapability(spv::Capability::SparseResidency);
if (isImageOpOnUnknownFormat(instr)) {
addCapability(instr->isImageWrite()
? spv::Capability::StorageImageWriteWithoutFormat
: spv::Capability::StorageImageReadWithoutFormat);
}
return true;
}
bool CapabilityVisitor::visitInstruction(SpirvInstruction *instr) {
const SpirvType *resultType = instr->getResultType();
const auto opcode = instr->getopcode();
const auto loc = instr->getSourceLocation();
// Add result-type-specific capabilities
addCapabilityForType(resultType, loc, instr->getStorageClass());
// Add NonUniform capabilities if necessary
if (instr->isNonUniform()) {
addExtension(Extension::EXT_descriptor_indexing, "NonUniformEXT", loc);
addCapability(spv::Capability::ShaderNonUniformEXT);
addCapability(getNonUniformCapability(resultType));
}
if (instr->getKind() == SpirvInstruction::IK_SpirvIntrinsicInstruction) {
SpirvIntrinsicInstruction *pSpvInst =
dyn_cast<SpirvIntrinsicInstruction>(instr);
for (auto &cap : pSpvInst->getCapabilities()) {
addCapability(static_cast<spv::Capability>(cap));
}
for (const auto &ext : pSpvInst->getExtensions()) {
spvBuilder.requireExtension(ext, loc);
}
}
// Add opcode-specific capabilities
switch (opcode) {
case spv::Op::OpDPdxCoarse:
case spv::Op::OpDPdyCoarse:
case spv::Op::OpFwidthCoarse:
case spv::Op::OpDPdxFine:
case spv::Op::OpDPdyFine:
case spv::Op::OpFwidthFine:
addCapability(spv::Capability::DerivativeControl);
break;
case spv::Op::OpGroupNonUniformElect:
addCapability(spv::Capability::GroupNonUniform);
break;
case spv::Op::OpGroupNonUniformAny:
case spv::Op::OpGroupNonUniformAll:
case spv::Op::OpGroupNonUniformAllEqual:
addCapability(spv::Capability::GroupNonUniformVote);
break;
case spv::Op::OpGroupNonUniformBallot:
case spv::Op::OpGroupNonUniformInverseBallot:
case spv::Op::OpGroupNonUniformBallotBitExtract:
case spv::Op::OpGroupNonUniformBallotBitCount:
case spv::Op::OpGroupNonUniformBallotFindLSB:
case spv::Op::OpGroupNonUniformBallotFindMSB:
case spv::Op::OpGroupNonUniformBroadcast:
case spv::Op::OpGroupNonUniformBroadcastFirst:
addCapability(spv::Capability::GroupNonUniformBallot);
break;
case spv::Op::OpGroupNonUniformShuffle:
case spv::Op::OpGroupNonUniformShuffleXor:
addCapability(spv::Capability::GroupNonUniformShuffle);
break;
case spv::Op::OpGroupNonUniformIAdd:
case spv::Op::OpGroupNonUniformFAdd:
case spv::Op::OpGroupNonUniformIMul:
case spv::Op::OpGroupNonUniformFMul:
case spv::Op::OpGroupNonUniformSMax:
case spv::Op::OpGroupNonUniformUMax:
case spv::Op::OpGroupNonUniformFMax:
case spv::Op::OpGroupNonUniformSMin:
case spv::Op::OpGroupNonUniformUMin:
case spv::Op::OpGroupNonUniformFMin:
case spv::Op::OpGroupNonUniformBitwiseAnd:
case spv::Op::OpGroupNonUniformBitwiseOr:
case spv::Op::OpGroupNonUniformBitwiseXor:
case spv::Op::OpGroupNonUniformLogicalAnd:
case spv::Op::OpGroupNonUniformLogicalOr:
case spv::Op::OpGroupNonUniformLogicalXor:
addCapability(spv::Capability::GroupNonUniformArithmetic);
break;
case spv::Op::OpGroupNonUniformQuadBroadcast:
case spv::Op::OpGroupNonUniformQuadSwap:
addCapability(spv::Capability::GroupNonUniformQuad);
break;
case spv::Op::OpVariable: {
if (spvOptions.enableReflect &&
!cast<SpirvVariable>(instr)->getHlslUserType().empty()) {
addExtension(Extension::GOOGLE_user_type, "HLSL User Type", loc);
addExtension(Extension::GOOGLE_hlsl_functionality1, "HLSL User Type",
loc);
}
break;
}
case spv::Op::OpRayQueryInitializeKHR: {
auto rayQueryInst = dyn_cast<SpirvRayQueryOpKHR>(instr);
if (rayQueryInst && rayQueryInst->hasCullFlags()) {
addCapability(spv::Capability::RayTraversalPrimitiveCullingKHR);
}
break;
}
case spv::Op::OpReportIntersectionKHR:
case spv::Op::OpIgnoreIntersectionKHR:
case spv::Op::OpTerminateRayKHR:
case spv::Op::OpTraceRayKHR:
case spv::Op::OpExecuteCallableKHR: {
if (featureManager.isExtensionEnabled(Extension::NV_ray_tracing)) {
addCapability(spv::Capability::RayTracingNV);
addExtension(Extension::NV_ray_tracing, "SPV_NV_ray_tracing", {});
} else {
// KHR_ray_tracing extension requires Vulkan 1.1 with VK_KHR_spirv_1_4
// extention or Vulkan 1.2.
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1_SPIRV_1_4,
"Raytracing", {});
addCapability(spv::Capability::RayTracingKHR);
addExtension(Extension::KHR_ray_tracing, "SPV_KHR_ray_tracing", {});
}
break;
}
case spv::Op::OpSetMeshOutputsEXT:
case spv::Op::OpEmitMeshTasksEXT: {
if (featureManager.isExtensionEnabled(Extension::EXT_mesh_shader)) {
featureManager.requestTargetEnv(SPV_ENV_UNIVERSAL_1_4, "MeshShader", {});
addCapability(spv::Capability::MeshShadingEXT);
addExtension(Extension::EXT_mesh_shader, "SPV_EXT_mesh_shader", {});
}
break;
}
default:
break;
}
return true;
}
bool CapabilityVisitor::visit(SpirvEntryPoint *entryPoint) {
shaderModel = entryPoint->getExecModel();
switch (shaderModel) {
case spv::ExecutionModel::Fragment:
case spv::ExecutionModel::Vertex:
case spv::ExecutionModel::GLCompute:
addCapability(spv::Capability::Shader);
break;
case spv::ExecutionModel::Geometry:
addCapability(spv::Capability::Geometry);
break;
case spv::ExecutionModel::TessellationControl:
case spv::ExecutionModel::TessellationEvaluation:
addCapability(spv::Capability::Tessellation);
break;
case spv::ExecutionModel::RayGenerationNV:
case spv::ExecutionModel::IntersectionNV:
case spv::ExecutionModel::ClosestHitNV:
case spv::ExecutionModel::AnyHitNV:
case spv::ExecutionModel::MissNV:
case spv::ExecutionModel::CallableNV:
if (featureManager.isExtensionEnabled(Extension::NV_ray_tracing)) {
addCapability(spv::Capability::RayTracingNV);
addExtension(Extension::NV_ray_tracing, "SPV_NV_ray_tracing", {});
} else {
// KHR_ray_tracing extension requires Vulkan 1.1 with VK_KHR_spirv_1_4
// extention or Vulkan 1.2.
featureManager.requestTargetEnv(SPV_ENV_VULKAN_1_1_SPIRV_1_4,
"Raytracing", {});
addCapability(spv::Capability::RayTracingKHR);
addExtension(Extension::KHR_ray_tracing, "SPV_KHR_ray_tracing", {});
}
break;
case spv::ExecutionModel::MeshNV:
case spv::ExecutionModel::TaskNV:
addCapability(spv::Capability::MeshShadingNV);
addExtension(Extension::NV_mesh_shader, "SPV_NV_mesh_shader", {});
break;
case spv::ExecutionModel::MeshEXT:
case spv::ExecutionModel::TaskEXT:
addCapability(spv::Capability::MeshShadingEXT);
addExtension(Extension::EXT_mesh_shader, "SPV_EXT_mesh_shader", {});
break;
default:
llvm_unreachable("found unknown shader model");
break;
}
return true;
}
bool CapabilityVisitor::visit(SpirvExecutionMode *execMode) {
spv::ExecutionMode executionMode = execMode->getExecutionMode();
SourceLocation execModeSourceLocation = execMode->getSourceLocation();
SourceLocation entryPointSourceLocation =
execMode->getEntryPoint()->getSourceLocation();
switch (executionMode) {
case spv::ExecutionMode::PostDepthCoverage:
addCapability(spv::Capability::SampleMaskPostDepthCoverage,
entryPointSourceLocation);
addExtension(Extension::KHR_post_depth_coverage,
"[[vk::post_depth_coverage]]", execModeSourceLocation);
break;
case spv::ExecutionMode::EarlyAndLateFragmentTestsAMD:
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::early_and_late_tests]]", execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefUnchangedFrontAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_unchanged_front]]", execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_unchanged_front]]", execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefGreaterFrontAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_greater_equal_front]]",
execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_greater_equal_front]]",
execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefLessFrontAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_less_equal_front]]",
execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_less_equal_front]]",
execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefUnchangedBackAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_unchanged_back]]", execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_unchanged_back]]", execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefGreaterBackAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_greater_equal_back]]",
execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_greater_equal_back]]",
execModeSourceLocation);
break;
case spv::ExecutionMode::StencilRefLessBackAMD:
addCapability(spv::Capability::StencilExportEXT, entryPointSourceLocation);
addExtension(Extension::AMD_shader_early_and_late_fragment_tests,
"[[vk::stencil_ref_less_equal_back]]", execModeSourceLocation);
addExtension(Extension::EXT_shader_stencil_export,
"[[vk::stencil_ref_less_equal_back]]", execModeSourceLocation);
break;
case spv::ExecutionMode::MaximallyReconvergesKHR:
addExtension(Extension::KHR_maximal_reconvergence, "",
execModeSourceLocation);
break;
case spv::ExecutionMode::DenormPreserve:
case spv::ExecutionMode::DenormFlushToZero:
// KHR_float_controls was promoted to core in Vulkan 1.2.
if (!featureManager.isTargetEnvVulkan1p2OrAbove()) {
addExtension(Extension::KHR_float_controls, "SPV_KHR_float_controls",
execModeSourceLocation);
}
addCapability(executionMode == spv::ExecutionMode::DenormPreserve
? spv::Capability::DenormPreserve
: spv::Capability::DenormFlushToZero,
execModeSourceLocation);
break;
default:
break;
}
return true;
}
bool CapabilityVisitor::visit(SpirvExtInstImport *instr) {
if (instr->getExtendedInstSetName() == "NonSemantic.DebugPrintf") {
addExtension(Extension::KHR_non_semantic_info, "DebugPrintf",
/*SourceLocation*/ {});
} else if (instr->getExtendedInstSetName() ==
"NonSemantic.Shader.DebugInfo.100") {
addExtension(Extension::KHR_non_semantic_info, "Shader.DebugInfo.100",
/*SourceLocation*/ {});
}
return true;
}
bool CapabilityVisitor::visit(SpirvAtomic *instr) {
if (instr->hasValue() && SpirvType::isOrContainsType<IntegerType, 64>(
instr->getValue()->getResultType())) {
addCapability(spv::Capability::Int64Atomics, instr->getSourceLocation());
}
return true;
}
bool CapabilityVisitor::visit(SpirvDemoteToHelperInvocation *inst) {
addCapability(spv::Capability::DemoteToHelperInvocation,
inst->getSourceLocation());
if (!featureManager.isTargetEnvVulkan1p3OrAbove()) {
addExtension(Extension::EXT_demote_to_helper_invocation, "discard",
inst->getSourceLocation());
}
return true;
}
bool CapabilityVisitor::IsShaderModelForRayTracing() {
switch (shaderModel) {
case spv::ExecutionModel::RayGenerationKHR:
case spv::ExecutionModel::ClosestHitKHR:
case spv::ExecutionModel::MissKHR:
case spv::ExecutionModel::CallableKHR:
case spv::ExecutionModel::IntersectionKHR:
return true;
default:
return false;
}
}
void CapabilityVisitor::AddVulkanMemoryModelForVolatile(SpirvDecoration *decor,
SourceLocation loc) {
// For Vulkan 1.3 or above, we can simply add Volatile decoration. We do not
// need VulkanMemoryModel capability.
if (featureManager.isTargetEnvVulkan1p3OrAbove()) {
return;
}
const auto builtin = static_cast<spv::BuiltIn>(decor->getParams()[0]);
bool enableVkMemoryModel = false;
switch (builtin) {
case spv::BuiltIn::SubgroupSize:
case spv::BuiltIn::SubgroupLocalInvocationId:
case spv::BuiltIn::SMIDNV:
case spv::BuiltIn::WarpIDNV:
case spv::BuiltIn::SubgroupEqMask:
case spv::BuiltIn::SubgroupGeMask:
case spv::BuiltIn::SubgroupGtMask:
case spv::BuiltIn::SubgroupLeMask:
case spv::BuiltIn::SubgroupLtMask: {
if (IsShaderModelForRayTracing()) {
enableVkMemoryModel = true;
}
break;
}
case spv::BuiltIn::RayTmaxKHR: {
if (shaderModel == spv::ExecutionModel::IntersectionKHR) {
enableVkMemoryModel = true;
}
break;
}
default:
break;
}
if (enableVkMemoryModel) {
// VulkanMemoryModel was promoted to the core for Vulkan 1.2 or above. For
// Vulkan 1.1 or earlier, we have to use SPV_KHR_vulkan_memory_model
// extension.
if (!featureManager.isTargetEnvVulkan1p2OrAbove()) {
addExtension(Extension::KHR_vulkan_memory_model,
"Volatile builtin variable in raytracing", loc);
}
addCapability(spv::Capability::VulkanMemoryModel, loc);
}
}
bool CapabilityVisitor::visit(SpirvIsHelperInvocationEXT *inst) {
addCapability(spv::Capability::DemoteToHelperInvocation,
inst->getSourceLocation());
addExtension(Extension::EXT_demote_to_helper_invocation,
"[[vk::HelperInvocation]]", inst->getSourceLocation());
return true;
}
bool CapabilityVisitor::visit(SpirvReadClock *inst) {
auto loc = inst->getSourceLocation();
addCapabilityForType(inst->getResultType(), loc, inst->getStorageClass());
addCapability(spv::Capability::ShaderClockKHR, loc);
addExtension(Extension::KHR_shader_clock, "ReadClock", loc);
return true;
}
bool CapabilityVisitor::visit(SpirvModule *, Visitor::Phase phase) {
// If there are no entry-points in the module (hence shaderModel is not set),
// add the Linkage capability. This allows library shader models to use
// 'export' attribute on functions, and generate an "incomplete/partial"
// SPIR-V binary.
// ExecutionModel::Max means that no entrypoints exist, therefore we should
// add the Linkage Capability.
if (phase == Visitor::Phase::Done &&
shaderModel == spv::ExecutionModel::Max) {
addCapability(spv::Capability::Shader);
addCapability(spv::Capability::Linkage);
}
// SPIRV-Tools now has a pass to trim superfluous capabilities. This means we
// can remove most capability-selection logic from here, and just add
// capabilities by default. SPIRV-Tools will clean those up. Note: this pass
// supports only some capabilities. This list should be expanded to match the
// supported capabilities.
addCapability(spv::Capability::MinLod);
addExtensionAndCapabilitiesIfEnabled(
Extension::EXT_fragment_shader_interlock,
{
spv::Capability::FragmentShaderSampleInterlockEXT,
spv::Capability::FragmentShaderPixelInterlockEXT,
spv::Capability::FragmentShaderShadingRateInterlockEXT,
});
addExtensionAndCapabilitiesIfEnabled(
Extension::NV_compute_shader_derivatives,
{
spv::Capability::ComputeDerivativeGroupQuadsNV,
spv::Capability::ComputeDerivativeGroupLinearNV,
});
// AccelerationStructureType or RayQueryType can be provided by both
// ray_tracing and ray_query extension. By default, we select ray_query to
// provide it. This is an arbitrary decision. If the user wants avoid one
// extension (lack of support by ex), if can be done by providing the list
// of enabled extensions.
if (!addExtensionAndCapabilitiesIfEnabled(Extension::KHR_ray_query,
{spv::Capability::RayQueryKHR})) {
addExtensionAndCapabilitiesIfEnabled(Extension::KHR_ray_tracing,
{spv::Capability::RayTracingKHR});
}
addExtensionAndCapabilitiesIfEnabled(
Extension::NV_shader_subgroup_partitioned,
{spv::Capability::GroupNonUniformPartitionedNV});
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/LiteralTypeVisitor.h | //===--- LiteralTypeVisitor.h - Literal Type Visitor -------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_LITERALTYPEVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_LITERALTYPEVISITOR_H
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class LiteralTypeVisitor : public Visitor {
public:
LiteralTypeVisitor(const ASTContext &ctx, SpirvContext &spvCtx,
const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx), astContext(ctx), curFnAstReturnType({}) {}
bool visit(SpirvFunction *, Phase) override;
bool visit(SpirvVariable *) override;
bool visit(SpirvAtomic *) override;
bool visit(SpirvUnaryOp *) override;
bool visit(SpirvBinaryOp *) override;
bool visit(SpirvBitFieldInsert *) override;
bool visit(SpirvBitFieldExtract *) override;
bool visit(SpirvSelect *) override;
bool visit(SpirvVectorShuffle *) override;
bool visit(SpirvGroupNonUniformOp *) override;
bool visit(SpirvLoad *) override;
bool visit(SpirvStore *) override;
bool visit(SpirvConstantComposite *) override;
bool visit(SpirvCompositeConstruct *) override;
bool visit(SpirvCompositeExtract *) override;
bool visit(SpirvAccessChain *) override;
bool visit(SpirvExtInst *) override;
bool visit(SpirvReturn *) override;
bool visit(SpirvCompositeInsert *) override;
bool visit(SpirvImageOp *) override;
bool visit(SpirvSwitch *) override;
using Visitor::visit;
// Note: We currently don't do anything to deduce literal types for the
// following instructions:
//
// SpirvImageQuery
// SpirvImageTexelPointer
// SpirvSpecConstantBinaryOp
// SpirvSpecConstantUnaryOp
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override;
private:
/// If the given instruction's return type is a literal type and the given
/// 'newType' is not a literal type, and they are of the same kind (both
/// integer or both float), updates the instruction's result type to newType.
/// Does nothing otherwise.
void tryToUpdateInstLitType(SpirvInstruction *, QualType newType);
/// returns true if the given literal type can be deduced to the given
/// newType. In order for that to be true,
/// a) litType must be a literal type
/// b) litType and newType must be either scalar or vectors of the same size
/// c) they must have the same underlying type (both int or both float)
bool canDeduceTypeFromLitType(QualType litType, QualType newType);
bool updateTypeForCompositeMembers(
QualType compositeType, llvm::ArrayRef<SpirvInstruction *> constituents);
/// Returns true if the given constant integer instruction contains a value
/// that cannot fit in 32 bits.
bool isLiteralLargerThan32Bits(SpirvConstantInteger *);
private:
const ASTContext &astContext;
QualType curFnAstReturnType;
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_LITERALTYPEVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/NonUniformVisitor.h | //===--- NonUniformVisitor.h - NonUniform Visitor ----------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_NONUNIFORMVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_NONUNIFORMVISITOR_H
#include "clang/SPIRV/FeatureManager.h"
#include "clang/SPIRV/SpirvContext.h"
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class SpirvBuilder;
/// Propagates the NonUniform decoration. According to the Vulkan Spec:
///
/// If an instruction loads from or stores to a resource (including atomics and
/// image instructions) and the resource descriptor being accessed is not
/// dynamically uniform, then the operand corresponding to that resource (e.g.
/// the pointer or sampled image operand) must be decorated with NonUniformEXT.
///
class NonUniformVisitor : public Visitor {
public:
NonUniformVisitor(SpirvContext &spvCtx, const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx) {}
bool visit(SpirvLoad *) override;
bool visit(SpirvAccessChain *) override;
bool visit(SpirvUnaryOp *) override;
bool visit(SpirvBinaryOp *) override;
bool visit(SpirvSampledImage *) override;
bool visit(SpirvImageTexelPointer *) override;
bool visit(SpirvAtomic *) override;
using Visitor::visit;
/// The "sink" visit function for all instructions.
///
/// By default, all other visit instructions redirect to this visit function.
/// So that you want override this visit function to handle all instructions,
/// regardless of their polymorphism.
bool visitInstruction(SpirvInstruction *instr) override { return true; }
private:
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_NONUNIFORMVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/LowerTypeVisitor.cpp | //===--- LowerTypeVisitor.cpp - AST type to SPIR-V type impl -----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "LowerTypeVisitor.h"
#include "ConstEvaluator.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/HlslTypes.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvFunction.h"
namespace clang {
namespace spirv {
namespace {
/// Returns the :packoffset() annotation on the given decl. Returns nullptr if
/// the decl does not have one.
hlsl::ConstantPacking *getPackOffset(const clang::NamedDecl *decl) {
for (auto *annotation : decl->getUnusualAnnotations())
if (auto *packing = llvm::dyn_cast<hlsl::ConstantPacking>(annotation))
return packing;
return nullptr;
}
/// Rounds the given value up to the given power of 2.
inline uint32_t roundToPow2(uint32_t val, uint32_t pow2) {
assert(pow2 != 0);
return (val + pow2 - 1) & ~(pow2 - 1);
}
} // end anonymous namespace
// This method sorts a field list in the following order:
// - fields with register annotation first, sorted by register index.
// - then fields without annotation, in order of declaration.
static std::vector<const HybridStructType::FieldInfo *>
sortFields(llvm::ArrayRef<HybridStructType::FieldInfo> fields) {
std::vector<const HybridStructType::FieldInfo *> output;
output.resize(fields.size());
auto back_inserter = output.rbegin();
std::map<uint32_t, const HybridStructType::FieldInfo *> fixed_fields;
for (auto it = fields.rbegin(); it < fields.rend(); it++) {
if (it->registerC) {
fixed_fields.insert({it->registerC->RegisterNumber, &*it});
} else {
*back_inserter = &*it;
back_inserter++;
}
}
auto front_inserter = output.begin();
for (const auto &item : fixed_fields) {
*front_inserter = item.second;
front_inserter++;
}
return output;
}
static void setDefaultFieldSize(const AlignmentSizeCalculator &alignmentCalc,
const SpirvLayoutRule rule,
const HybridStructType::FieldInfo *currentField,
StructType::FieldInfo *field) {
const auto &fieldType = currentField->astType;
uint32_t memberAlignment = 0, memberSize = 0, stride = 0;
std::tie(memberAlignment, memberSize) = alignmentCalc.getAlignmentAndSize(
fieldType, rule, /*isRowMajor*/ llvm::None, &stride);
field->sizeInBytes = memberSize;
return;
}
// Correctly determine a field offset/size/padding depending on its neighbors
// and other rules.
static void
setDefaultFieldOffset(const AlignmentSizeCalculator &alignmentCalc,
const SpirvLayoutRule rule,
const uint32_t previousFieldEnd,
const HybridStructType::FieldInfo *currentField,
StructType::FieldInfo *field) {
const auto &fieldType = currentField->astType;
uint32_t memberAlignment = 0, memberSize = 0, stride = 0;
std::tie(memberAlignment, memberSize) = alignmentCalc.getAlignmentAndSize(
fieldType, rule, /*isRowMajor*/ llvm::None, &stride);
const uint32_t baseOffset = previousFieldEnd;
// The next avaiable location after laying out the previous members
if (rule != SpirvLayoutRule::RelaxedGLSLStd140 &&
rule != SpirvLayoutRule::RelaxedGLSLStd430 &&
rule != SpirvLayoutRule::FxcCTBuffer) {
field->offset = roundToPow2(baseOffset, memberAlignment);
return;
}
uint32_t newOffset = previousFieldEnd;
alignmentCalc.alignUsingHLSLRelaxedLayout(fieldType, memberSize,
memberAlignment, &newOffset);
field->offset = newOffset;
}
bool LowerTypeVisitor::visit(SpirvFunction *fn, Phase phase) {
if (phase == Visitor::Phase::Done) {
// Lower the function return type.
const SpirvType *spirvReturnType =
lowerType(fn->getAstReturnType(), SpirvLayoutRule::Void,
/*isRowMajor*/ llvm::None,
/*SourceLocation*/ {});
fn->setReturnType(spirvReturnType);
// Lower the function parameter types.
auto params = fn->getParameters();
llvm::SmallVector<const SpirvType *, 4> spirvParamTypes;
for (auto *param : params) {
spirvParamTypes.push_back(param->getResultType());
}
fn->setFunctionType(
spvContext.getFunctionType(spirvReturnType, spirvParamTypes));
}
return true;
}
bool LowerTypeVisitor::visitInstruction(SpirvInstruction *instr) {
if (spvContext.hasLoweredType(instr))
return true;
const QualType astType = instr->getAstResultType();
const SpirvType *hybridType = instr->getResultType();
// Lower QualType to SpirvType
if (astType != QualType({})) {
const SpirvType *spirvType =
lowerType(astType, instr->getLayoutRule(), /*isRowMajor*/ llvm::None,
instr->getSourceLocation());
instr->setResultType(spirvType);
}
// Lower Hybrid type to SpirvType
else if (hybridType) {
const SpirvType *spirvType = lowerType(hybridType, instr->getLayoutRule(),
instr->getSourceLocation());
instr->setResultType(spirvType);
}
// Lower QualType of DebugLocalVariable or DebugGlobalVariable to SpirvType.
// Since debug local/global variable must have a debug type, SpirvEmitter sets
// its QualType. Here we lower it to SpirvType and DebugTypeVisitor will lower
// the SpirvType to debug type.
if (auto *debugInstruction = dyn_cast<SpirvDebugInstruction>(instr)) {
const QualType debugQualType = debugInstruction->getDebugQualType();
if (!debugQualType.isNull()) {
assert(isa<SpirvDebugLocalVariable>(debugInstruction) ||
isa<SpirvDebugGlobalVariable>(debugInstruction));
const SpirvType *spirvType =
lowerType(debugQualType, instr->getLayoutRule(),
/*isRowMajor*/ llvm::None, instr->getSourceLocation());
debugInstruction->setDebugSpirvType(spirvType);
} else if (const auto *debugSpirvType =
debugInstruction->getDebugSpirvType()) {
// When it does not have a QualType, either the type is already lowered,
// or it's an HybridStructType we should lower.
assert(isa<SpirvDebugGlobalVariable>(debugInstruction));
if (isa<HybridType>(debugSpirvType)) {
const SpirvType *loweredSpirvType = lowerType(
debugSpirvType, instr->getLayoutRule(), instr->getSourceLocation());
debugInstruction->setDebugSpirvType(loweredSpirvType);
} else {
debugInstruction->setDebugSpirvType(debugSpirvType);
}
}
}
// Instruction-specific type updates
const auto *resultType = instr->getResultType();
switch (instr->getopcode()) {
case spv::Op::OpSampledImage: {
// Wrap the image type in sampled image type if necessary.
if (!isa<SampledImageType>(resultType)) {
assert(isa<ImageType>(resultType));
instr->setResultType(
spvContext.getSampledImageType(cast<ImageType>(resultType)));
}
break;
}
// Variables and function parameters must have a pointer type.
case spv::Op::OpFunctionParameter:
case spv::Op::OpVariable: {
if (auto *var = dyn_cast<SpirvVariable>(instr)) {
if (var->hasBinding() && var->getHlslUserType().empty()) {
var->setHlslUserType(getHlslResourceTypeName(var->getAstResultType()));
}
auto vkImgFeatures = spvContext.getVkImageFeaturesForSpirvVariable(var);
if (vkImgFeatures.format != spv::ImageFormat::Unknown) {
if (const auto *imageType = dyn_cast<ImageType>(resultType)) {
resultType = spvContext.getImageType(imageType, vkImgFeatures.format);
instr->setResultType(resultType);
} else if (const auto *arrayType = dyn_cast<ArrayType>(resultType)) {
if (const auto *imageType =
dyn_cast<ImageType>(arrayType->getElementType())) {
auto newImgType =
spvContext.getImageType(imageType, vkImgFeatures.format);
resultType = spvContext.getArrayType(newImgType,
arrayType->getElementCount(),
arrayType->getStride());
instr->setResultType(resultType);
}
}
}
}
const SpirvType *pointerType =
spvContext.getPointerType(resultType, instr->getStorageClass());
instr->setResultType(pointerType);
break;
}
// Access chains must have a pointer type. The storage class for the pointer
// is the same as the storage class of the access base.
case spv::Op::OpAccessChain: {
const auto *pointerType = spvContext.getPointerType(
resultType,
cast<SpirvAccessChain>(instr)->getBase()->getStorageClass());
instr->setResultType(pointerType);
break;
}
// OpImageTexelPointer's result type must be a pointer with image storage
// class.
case spv::Op::OpImageTexelPointer: {
const SpirvType *pointerType =
spvContext.getPointerType(resultType, spv::StorageClass::Image);
instr->setResultType(pointerType);
break;
}
// Sparse image operations return a sparse residency struct.
case spv::Op::OpImageSparseSampleImplicitLod:
case spv::Op::OpImageSparseSampleExplicitLod:
case spv::Op::OpImageSparseSampleDrefImplicitLod:
case spv::Op::OpImageSparseSampleDrefExplicitLod:
case spv::Op::OpImageSparseFetch:
case spv::Op::OpImageSparseGather:
case spv::Op::OpImageSparseDrefGather:
case spv::Op::OpImageSparseRead: {
const auto *uintType = spvContext.getUIntType(32);
const auto *sparseResidencyStruct = spvContext.getStructType(
{StructType::FieldInfo(uintType, /* fieldIndex*/ 0, "Residency.Code"),
StructType::FieldInfo(resultType, /* fieldIndex*/ 1, "Result.Type")},
"SparseResidencyStruct");
instr->setResultType(sparseResidencyStruct);
break;
}
case spv::Op::OpSwitch: {
SpirvSwitch *spirvSwitch = cast<SpirvSwitch>(instr);
// OpSwitch target literals must have the same type as the selector. Now
// that the selector's AST type has been lowered, update the literals if
// necessary.
const SpirvType *selectorType = spirvSwitch->getSelector()->getResultType();
// Selectors must have a type of OpTypeInt.
assert(selectorType->getKind() == SpirvType::TK_Integer);
uint32_t bitwidth = cast<IntegerType>(selectorType)->getBitwidth();
for (auto &target : spirvSwitch->getTargets()) {
if (target.first.getBitWidth() != bitwidth) {
target.first = target.first.sextOrTrunc(bitwidth);
}
}
break;
}
default:
break;
}
// The instruction does not have a result-type, so nothing to do.
return true;
}
const SpirvType *LowerTypeVisitor::lowerType(const SpirvType *type,
SpirvLayoutRule rule,
SourceLocation loc) {
if (const auto *hybridPointer = dyn_cast<HybridPointerType>(type)) {
const QualType pointeeType = hybridPointer->getPointeeType();
const SpirvType *pointeeSpirvType =
lowerType(pointeeType, rule, /*isRowMajor*/ llvm::None, loc);
return spvContext.getPointerType(pointeeSpirvType,
hybridPointer->getStorageClass());
} else if (const auto *hybridSampledImage =
dyn_cast<HybridSampledImageType>(type)) {
const QualType imageAstType = hybridSampledImage->getImageType();
const SpirvType *imageSpirvType =
lowerType(imageAstType, rule, /*isRowMajor*/ llvm::None, loc);
assert(isa<ImageType>(imageSpirvType));
return spvContext.getSampledImageType(cast<ImageType>(imageSpirvType));
} else if (const auto *hybridStruct = dyn_cast<HybridStructType>(type)) {
// lower all fields of the struct.
auto loweredFields =
populateLayoutInformation(hybridStruct->getFields(), rule);
const StructType *structType = spvContext.getStructType(
loweredFields, hybridStruct->getStructName(),
hybridStruct->isReadOnly(), hybridStruct->getInterfaceType());
if (const auto *decl = spvContext.getStructDeclForSpirvType(type))
spvContext.registerStructDeclForSpirvType(structType, decl);
return structType;
}
// Void, bool, int, float cannot be further lowered.
// Matrices cannot contain hybrid types. Only matrices of scalars are valid.
// sampledType in image types can only be numberical type.
// Sampler types cannot be further lowered.
// SampledImage types cannot be further lowered.
// FunctionType is not allowed to contain hybrid parameters or return type.
// StructType is not allowed to contain any hybrid types.
else if (isa<VoidType>(type) || isa<ScalarType>(type) ||
isa<MatrixType>(type) || isa<ImageType>(type) ||
isa<SamplerType>(type) || isa<SampledImageType>(type) ||
isa<FunctionType>(type) || isa<StructType>(type)) {
return type;
}
// Vectors could contain a hybrid type
else if (const auto *vecType = dyn_cast<VectorType>(type)) {
const auto *loweredElemType =
lowerType(vecType->getElementType(), rule, loc);
// If vector didn't contain any hybrid types, return itself.
if (vecType->getElementType() == loweredElemType)
return vecType;
return spvContext.getVectorType(loweredElemType,
vecType->getElementCount());
}
// Arrays could contain a hybrid type
else if (const auto *arrType = dyn_cast<ArrayType>(type)) {
const auto *loweredElemType =
lowerType(arrType->getElementType(), rule, loc);
// If array didn't contain any hybrid types, return itself.
if (arrType->getElementType() == loweredElemType)
return arrType;
return spvContext.getArrayType(loweredElemType, arrType->getElementCount(),
arrType->getStride());
}
// Runtime arrays could contain a hybrid type
else if (const auto *raType = dyn_cast<RuntimeArrayType>(type)) {
const auto *loweredElemType =
lowerType(raType->getElementType(), rule, loc);
// If runtime array didn't contain any hybrid types, return itself.
if (raType->getElementType() == loweredElemType)
return raType;
return spvContext.getRuntimeArrayType(loweredElemType, raType->getStride());
}
// Pointer types could point to a hybrid type.
else if (const auto *ptrType = dyn_cast<SpirvPointerType>(type)) {
const auto *loweredPointee =
lowerType(ptrType->getPointeeType(), rule, loc);
// If the pointer type didn't point to any hybrid type, return itself.
if (ptrType->getPointeeType() == loweredPointee)
return ptrType;
return spvContext.getPointerType(loweredPointee,
ptrType->getStorageClass());
}
llvm_unreachable("lowering of hybrid type not implemented");
}
const SpirvType *LowerTypeVisitor::lowerType(QualType type,
SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor,
SourceLocation srcLoc) {
const auto desugaredType = desugarType(type, &isRowMajor);
if (desugaredType != type) {
const auto *spvType = lowerType(desugaredType, rule, isRowMajor, srcLoc);
return spvType;
}
{ // Primitive types
QualType ty = {};
if (isScalarType(type, &ty)) {
if (const auto *builtinType = ty->getAs<BuiltinType>()) {
const bool use16Bit = getCodeGenOptions().enable16BitTypes;
// Cases sorted roughly according to frequency in source code
switch (builtinType->getKind()) {
// 32-bit types
case BuiltinType::Float:
// The HalfFloat AST type is just an alias for the Float AST type
// and is always 32-bit. The HLSL half keyword is translated to
// HalfFloat if -enable-16bit-types is false.
case BuiltinType::HalfFloat:
return spvContext.getFloatType(32);
case BuiltinType::Int:
return spvContext.getSIntType(32);
case BuiltinType::UInt:
case BuiltinType::ULong:
// The 'int8_t4_packed' and 'uint8_t4_packed' types are in fact 32-bit
// unsigned integers.
case BuiltinType::Int8_4Packed:
case BuiltinType::UInt8_4Packed:
return spvContext.getUIntType(32);
// void and bool
case BuiltinType::Void:
return spvContext.getVoidType();
case BuiltinType::Bool:
// According to the SPIR-V spec, there is no physical size or bit
// pattern defined for boolean type. Therefore an unsigned integer
// is used to represent booleans when layout is required.
if (rule == SpirvLayoutRule::Void)
return spvContext.getBoolType();
else
return spvContext.getUIntType(32);
// 64-bit types
case BuiltinType::Double:
return spvContext.getFloatType(64);
case BuiltinType::LongLong:
return spvContext.getSIntType(64);
case BuiltinType::ULongLong:
return spvContext.getUIntType(64);
// 16-bit types
// The Half AST type is always 16-bit. The HLSL half keyword is
// translated to Half if -enable-16bit-types is true.
case BuiltinType::Half:
return spvContext.getFloatType(16);
case BuiltinType::Short: // int16_t
return spvContext.getSIntType(16);
case BuiltinType::UShort: // uint16_t
return spvContext.getUIntType(16);
// 8-bit integer types
case BuiltinType::UChar:
case BuiltinType::Char_U:
return spvContext.getUIntType(8);
case BuiltinType::SChar:
case BuiltinType::Char_S:
return spvContext.getSIntType(8);
// Relaxed precision types
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
return spvContext.getFloatType(use16Bit ? 16 : 32);
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
return spvContext.getSIntType(use16Bit ? 16 : 32);
case BuiltinType::Min16UInt:
return spvContext.getUIntType(use16Bit ? 16 : 32);
// All literal types should have been lowered to concrete types before
// LowerTypeVisitor is invoked. However, if there are unused literals,
// they will still have 'literal' type when we get to this point. Use
// 32-bit width by default for these cases.
// Example:
// void main() { 1.0; 1; }
case BuiltinType::LitInt:
return type->isSignedIntegerType() ? spvContext.getSIntType(32)
: spvContext.getUIntType(32);
case BuiltinType::LitFloat: {
return spvContext.getFloatType(32);
default:
emitError("primitive type %0 unimplemented", srcLoc)
<< builtinType->getTypeClassName();
return spvContext.getVoidType();
}
}
}
}
}
// AST vector/matrix types are TypedefType of TemplateSpecializationType. We
// handle them via HLSL type inspection functions.
// When the memory layout rule is FxcCTBuffer, typeNxM matrix with M > 1 and
// N == 1 consists of M vectors where each vector has a single element. Since
// SPIR-V does not have a vector with single element, we have to use an
// OpTypeArray with ArrayStride 16 instead of OpTypeVector. We have the same
// rule for column_major typeNxM and row_major typeMxN.
if (rule == SpirvLayoutRule::FxcCTBuffer && hlsl::IsHLSLMatType(type)) {
uint32_t rowCount = 0, colCount = 0;
hlsl::GetHLSLMatRowColCount(type, rowCount, colCount);
if (!alignmentCalc.useRowMajor(isRowMajor, type))
std::swap(rowCount, colCount);
if (rowCount == 1) {
useArrayForMat1xN = true;
auto elemType = hlsl::GetHLSLMatElementType(type);
uint32_t stride = 0;
alignmentCalc.getAlignmentAndSize(type, rule, isRowMajor, &stride);
return spvContext.getArrayType(
lowerType(elemType, rule, isRowMajor, srcLoc), colCount, stride);
}
}
{ // Vector types
QualType elemType = {};
uint32_t elemCount = {};
if (isVectorType(type, &elemType, &elemCount))
return spvContext.getVectorType(
lowerType(elemType, rule, isRowMajor, srcLoc), elemCount);
}
{ // Matrix types
QualType elemType = {};
uint32_t rowCount = 0, colCount = 0;
if (isMxNMatrix(type, &elemType, &rowCount, &colCount)) {
const auto *vecType = spvContext.getVectorType(
lowerType(elemType, rule, isRowMajor, srcLoc), colCount);
// Non-float matrices are represented as an array of vectors.
if (!elemType->isFloatingType()) {
llvm::Optional<uint32_t> arrayStride = llvm::None;
// If there is a layout rule, we need array stride information.
if (rule != SpirvLayoutRule::Void) {
uint32_t stride = 0;
alignmentCalc.getAlignmentAndSize(type, rule, isRowMajor, &stride);
arrayStride = stride;
}
// This return type is ArrayType.
return spvContext.getArrayType(vecType, rowCount, arrayStride);
}
return spvContext.getMatrixType(vecType, rowCount);
}
}
// Struct type
if (const auto *structType = type->getAs<RecordType>()) {
const auto *decl = structType->getDecl();
// HLSL resource types are also represented as RecordType in the AST.
// (ClassTemplateSpecializationDecl is a subclass of CXXRecordDecl, which
// is then a subclass of RecordDecl.) So we need to check them before
// checking the general struct type.
if (const auto *spvType =
lowerResourceType(type, rule, isRowMajor, srcLoc)) {
spvContext.registerStructDeclForSpirvType(spvType, decl);
return spvType;
}
auto loweredFields = lowerStructFields(decl, rule);
const auto *spvStructType =
spvContext.getStructType(loweredFields, decl->getName());
spvContext.registerStructDeclForSpirvType(spvStructType, decl);
return spvStructType;
}
// Array type
if (const auto *arrayType = astContext.getAsArrayType(type)) {
const auto elemType = arrayType->getElementType();
// If layout rule is void, it means these resource types are used for
// declaring local resources. This should be lowered to a pointer to the
// array.
//
// The pointer points to the Uniform storage class, and the element type
// should have the corresponding layout.
bool isLocalStructuredOrByteBuffer =
isAKindOfStructuredOrByteBuffer(elemType) &&
rule == SpirvLayoutRule::Void;
SpirvLayoutRule elementLayoutRule =
(isLocalStructuredOrByteBuffer ? getCodeGenOptions().sBufferLayoutRule
: rule);
const SpirvType *loweredElemType =
lowerType(elemType, elementLayoutRule, isRowMajor, srcLoc);
llvm::Optional<uint32_t> arrayStride = llvm::None;
if (rule != SpirvLayoutRule::Void &&
// We won't have stride information for structured/byte buffers since
// they contain runtime arrays.
!isAKindOfStructuredOrByteBuffer(elemType) &&
!isConstantTextureBuffer(elemType)) {
uint32_t stride = 0;
alignmentCalc.getAlignmentAndSize(type, rule, isRowMajor, &stride);
arrayStride = stride;
}
const SpirvType *spirvArrayType = nullptr;
if (const auto *caType = astContext.getAsConstantArrayType(type)) {
const auto size = static_cast<uint32_t>(caType->getSize().getZExtValue());
spirvArrayType =
spvContext.getArrayType(loweredElemType, size, arrayStride);
} else {
assert(type->isIncompleteArrayType());
spirvArrayType =
spvContext.getRuntimeArrayType(loweredElemType, arrayStride);
}
if (isLocalStructuredOrByteBuffer) {
return spvContext.getPointerType(spirvArrayType,
spv::StorageClass::Uniform);
}
return spirvArrayType;
}
// Reference types
if (const auto *refType = type->getAs<ReferenceType>()) {
// Note: Pointer/reference types are disallowed in HLSL source code.
// Although developers cannot use them directly, they are generated into
// the AST by out/inout parameter modifiers in function signatures.
// We already pass function arguments via pointers to tempoary local
// variables. So it should be fine to drop the pointer type and treat it
// as the underlying pointee type here.
return lowerType(refType->getPointeeType(), rule, isRowMajor, srcLoc);
}
// Pointer types
if (const auto *ptrType = type->getAs<PointerType>()) {
// The this object in a struct member function is of pointer type.
return lowerType(ptrType->getPointeeType(), rule, isRowMajor, srcLoc);
}
// Enum types
if (isEnumType(type)) {
return spvContext.getSIntType(32);
}
// Templated types.
if (const auto *spec = type->getAs<TemplateSpecializationType>()) {
return lowerType(spec->desugar(), rule, isRowMajor, srcLoc);
}
if (const auto *spec = type->getAs<SubstTemplateTypeParmType>()) {
return lowerType(spec->desugar(), rule, isRowMajor, srcLoc);
}
emitError("lower type %0 unimplemented", srcLoc) << type->getTypeClassName();
type->dump();
return 0;
}
QualType LowerTypeVisitor::createASTTypeFromTemplateName(TemplateName name) {
auto *decl = name.getAsTemplateDecl();
if (decl == nullptr) {
return QualType();
}
auto *classTemplateDecl = dyn_cast<ClassTemplateDecl>(decl);
if (classTemplateDecl == nullptr) {
return QualType();
}
TemplateParameterList *parameters =
classTemplateDecl->getTemplateParameters();
if (parameters->size() != 1) {
return QualType();
}
auto *parmDecl = dyn_cast<TemplateTypeParmDecl>(parameters->getParam(0));
if (parmDecl == nullptr) {
return QualType();
}
if (!parmDecl->hasDefaultArgument()) {
return QualType();
}
TemplateArgument *arg =
new (context) TemplateArgument(parmDecl->getDefaultArgument());
auto *specialized = ClassTemplateSpecializationDecl::Create(
astContext, TagDecl::TagKind::TTK_Class,
classTemplateDecl->getDeclContext(), classTemplateDecl->getLocStart(),
classTemplateDecl->getLocStart(), classTemplateDecl, /* Args */ arg,
/* NumArgs */ 1,
/* PrevDecl */ nullptr);
QualType type = astContext.getTypeDeclType(specialized);
return type;
}
bool LowerTypeVisitor::getVkIntegralConstantValue(QualType type,
SpirvConstant *&result,
SourceLocation srcLoc) {
auto *recordType = type->getAs<RecordType>();
if (!recordType)
return false;
if (!isTypeInVkNamespace(recordType))
return false;
if (recordType->getDecl()->getName() == "Literal") {
auto *specDecl =
dyn_cast<ClassTemplateSpecializationDecl>(recordType->getDecl());
assert(specDecl);
const TemplateArgumentList &args = specDecl->getTemplateArgs();
QualType constant = args[0].getAsType();
bool val = getVkIntegralConstantValue(constant, result, srcLoc);
if (val) {
result->setLiteral(true);
} else {
emitError("The template argument to vk::Literal must be a "
"vk::integral_constant",
srcLoc);
}
return true;
}
if (recordType->getDecl()->getName() != "integral_constant")
return false;
auto *specDecl =
dyn_cast<ClassTemplateSpecializationDecl>(recordType->getDecl());
assert(specDecl);
const TemplateArgumentList &args = specDecl->getTemplateArgs();
QualType constantType = args[0].getAsType();
llvm::APSInt value = args[1].getAsIntegral();
result = ConstEvaluator(astContext, spvBuilder)
.translateAPValue(APValue(value), constantType, false);
return true;
}
const SpirvType *LowerTypeVisitor::lowerInlineSpirvType(
llvm::StringRef name, unsigned int opcode,
const ClassTemplateSpecializationDecl *specDecl, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor, SourceLocation srcLoc) {
assert(specDecl);
SmallVector<SpvIntrinsicTypeOperand, 4> operands;
// Lower each operand argument
size_t operandsIndex = 1;
if (name == "SpirvType")
operandsIndex = 3;
auto args = specDecl->getTemplateArgs()[operandsIndex].getPackAsArray();
for (TemplateArgument arg : args) {
switch (arg.getKind()) {
case TemplateArgument::ArgKind::Type: {
QualType typeArg = arg.getAsType();
SpirvConstant *constant = nullptr;
if (getVkIntegralConstantValue(typeArg, constant, srcLoc)) {
if (constant) {
visitInstruction(constant);
operands.emplace_back(constant);
}
} else {
operands.emplace_back(lowerType(typeArg, rule, isRowMajor, srcLoc));
}
break;
}
case TemplateArgument::ArgKind::Template: {
// Handle HLSL template types that allow the omission of < and >; for
// example, Texture2D
TemplateName templateName = arg.getAsTemplate();
QualType typeArg = createASTTypeFromTemplateName(templateName);
assert(!typeArg.isNull() &&
"Could not create HLSL type from template name");
operands.emplace_back(lowerType(typeArg, rule, isRowMajor, srcLoc));
break;
}
default:
emitError("template argument kind %0 unimplemented", srcLoc)
<< arg.getKind();
}
}
return spvContext.getOrCreateSpirvIntrinsicType(opcode, operands);
}
const SpirvType *LowerTypeVisitor::lowerVkTypeInVkNamespace(
QualType type, llvm::StringRef name, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor, SourceLocation srcLoc) {
if (name == "SpirvType" || name == "SpirvOpaqueType") {
auto opcode = hlsl::GetHLSLResourceTemplateUInt(type);
auto *specDecl = dyn_cast<ClassTemplateSpecializationDecl>(
type->getAs<RecordType>()->getDecl());
return lowerInlineSpirvType(name, opcode, specDecl, rule, isRowMajor,
srcLoc);
}
if (name == "ext_type") {
auto typeId = hlsl::GetHLSLResourceTemplateUInt(type);
return spvContext.getCreatedSpirvIntrinsicType(typeId);
}
if (name == "ext_result_id") {
QualType realType = hlsl::GetHLSLResourceTemplateParamType(type);
return lowerType(realType, rule, llvm::None, srcLoc);
}
emitError("unknown type %0 in vk namespace", srcLoc) << type;
return nullptr;
}
const SpirvType *
LowerTypeVisitor::lowerResourceType(QualType type, SpirvLayoutRule rule,
llvm::Optional<bool> isRowMajor,
SourceLocation srcLoc) {
// Resource types are either represented like C struct or C++ class in the
// AST. Samplers are represented like C struct, so isStructureType() will
// return true for it; textures are represented like C++ class, so
// isClassType() will return true for it.
assert(type->isStructureOrClassType());
const auto *recordType = type->getAs<RecordType>();
assert(recordType);
const llvm::StringRef name = recordType->getDecl()->getName();
if (isTypeInVkNamespace(recordType)) {
return lowerVkTypeInVkNamespace(type, name, rule, isRowMajor, srcLoc);
}
// TODO: avoid string comparison once hlsl::IsHLSLResouceType() does that.
// Vulkan does not yet support true 16-bit float texture objexts.
if (name == "Buffer" || name == "RWBuffer" || name == "Texture1D" ||
name == "Texture2D" || name == "Texture3D" || name == "TextureCube" ||
name == "Texture1DArray" || name == "Texture2DArray" ||
name == "Texture2DMS" || name == "Texture2DMSArray" ||
name == "TextureCubeArray" || name == "RWTexture1D" ||
name == "RWTexture2D" || name == "RWTexture3D" ||
name == "RWTexture1DArray" || name == "RWTexture2DArray") {
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
const auto loweredType =
lowerType(getElementType(astContext, sampledType), rule,
/*isRowMajor*/ llvm::None, srcLoc);
if (const auto *floatType = dyn_cast<FloatType>(loweredType)) {
if (floatType->getBitwidth() == 16) {
emitError("16-bit texture types not yet supported with -spirv", srcLoc);
return nullptr;
}
}
}
{ // Texture types
spv::Dim dim = {};
bool isArray = {};
if ((dim = spv::Dim::Dim1D, isArray = false, name == "Texture1D") ||
(dim = spv::Dim::Dim2D, isArray = false, name == "Texture2D") ||
(dim = spv::Dim::Dim3D, isArray = false, name == "Texture3D") ||
(dim = spv::Dim::Cube, isArray = false, name == "TextureCube") ||
(dim = spv::Dim::Dim1D, isArray = true, name == "Texture1DArray") ||
(dim = spv::Dim::Dim2D, isArray = true, name == "Texture2DArray") ||
(dim = spv::Dim::Dim2D, isArray = false, name == "Texture2DMS") ||
(dim = spv::Dim::Dim2D, isArray = true, name == "Texture2DMSArray") ||
// There is no Texture3DArray
(dim = spv::Dim::Cube, isArray = true, name == "TextureCubeArray")) {
const bool isMS = (name == "Texture2DMS" || name == "Texture2DMSArray");
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
auto loweredType =
lowerType(getElementType(astContext, sampledType), rule,
/*isRowMajor*/ llvm::None, srcLoc);
// Treat bool textures as uint for compatibility with OpTypeImage.
if (loweredType == spvContext.getBoolType()) {
loweredType = spvContext.getUIntType(32);
}
return spvContext.getImageType(
loweredType, dim, ImageType::WithDepth::Unknown, isArray, isMS,
ImageType::WithSampler::Yes, spv::ImageFormat::Unknown);
}
// There is no RWTexture3DArray
if ((dim = spv::Dim::Dim1D, isArray = false,
name == "RWTexture1D" || name == "RasterizerOrderedTexture1D") ||
(dim = spv::Dim::Dim2D, isArray = false,
name == "RWTexture2D" || name == "RasterizerOrderedTexture2D") ||
(dim = spv::Dim::Dim3D, isArray = false,
name == "RWTexture3D" || name == "RasterizerOrderedTexture3D") ||
(dim = spv::Dim::Dim1D, isArray = true,
name == "RWTexture1DArray" ||
name == "RasterizerOrderedTexture1DArray") ||
(dim = spv::Dim::Dim2D, isArray = true,
name == "RWTexture2DArray" ||
name == "RasterizerOrderedTexture2DArray")) {
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
const auto format =
translateSampledTypeToImageFormat(sampledType, srcLoc);
return spvContext.getImageType(
lowerType(getElementType(astContext, sampledType), rule,
/*isRowMajor*/ llvm::None, srcLoc),
dim, ImageType::WithDepth::Unknown, isArray,
/*isMultiSampled=*/false, /*sampled=*/ImageType::WithSampler::No,
format);
}
}
// Sampler types
if (name == "SamplerState" || name == "SamplerComparisonState") {
return spvContext.getSamplerType();
}
if (name == "RaytracingAccelerationStructure") {
return spvContext.getAccelerationStructureTypeNV();
}
if (name == "RayQuery")
return spvContext.getRayQueryTypeKHR();
if (name == "StructuredBuffer" || name == "RWStructuredBuffer" ||
name == "RasterizerOrderedStructuredBuffer" ||
name == "AppendStructuredBuffer" || name == "ConsumeStructuredBuffer") {
// StructureBuffer<S> will be translated into an OpTypeStruct with one
// field, which is an OpTypeRuntimeArray of OpTypeStruct (S).
// If layout rule is void, it means these resource types are used for
// declaring local resources, which should be created as alias variables.
// The aliased-to variable should surely be in the Uniform storage class,
// which has layout decorations.
bool asAlias = false;
if (rule == SpirvLayoutRule::Void) {
asAlias = true;
rule = getCodeGenOptions().sBufferLayoutRule;
}
// Get the underlying resource type.
const auto s = hlsl::GetHLSLResourceResultType(type);
// If the underlying type is a matrix, check majorness.
llvm::Optional<bool> isRowMajor = llvm::None;
if (isMxNMatrix(s))
isRowMajor = isRowMajorMatrix(spvOptions, type);
// Lower the underlying type.
const auto *structType = lowerType(s, rule, isRowMajor, srcLoc);
// Calculate memory alignment for the resource.
uint32_t arrayStride = 0;
QualType sArray = astContext.getConstantArrayType(
s, llvm::APInt(32, 1), clang::ArrayType::Normal, 0);
alignmentCalc.getAlignmentAndSize(sArray, rule, isRowMajor, &arrayStride);
// We have a runtime array of structures. So:
// The stride of the runtime array is the size of the struct.
const auto *raType =
spvContext.getRuntimeArrayType(structType, arrayStride);
const bool isReadOnly = (name == "StructuredBuffer");
// Attach matrix stride decorations if this is a *StructuredBuffer<matrix>.
llvm::Optional<uint32_t> matrixStride = llvm::None;
if (isMxNMatrix(s)) {
uint32_t stride = 0;
alignmentCalc.getAlignmentAndSize(s, rule, isRowMajor, &stride);
matrixStride = stride;
}
const std::string typeName = "type." + name.str() + "." + getAstTypeName(s);
const auto *valType = spvContext.getStructType(
{StructType::FieldInfo(raType, /* fieldIndex*/ 0, /*name*/ "",
/*offset*/ 0, matrixStride, isRowMajor)},
typeName, isReadOnly, StructInterfaceType::StorageBuffer);
if (asAlias) {
// All structured buffers are in the Uniform storage class.
return spvContext.getPointerType(valType, spv::StorageClass::Uniform);
}
return valType;
}
if (name == "ConstantBuffer" || name == "TextureBuffer") {
// ConstantBuffer<T> and TextureBuffer<T> are lowered as T
const bool forTBuffer = name == "TextureBuffer";
if (rule == SpirvLayoutRule::Void) {
rule = forTBuffer ? getCodeGenOptions().tBufferLayoutRule
: getCodeGenOptions().cBufferLayoutRule;
}
const auto *bufferType = type->getAs<RecordType>();
assert(bufferType);
const auto *bufferDecl = bufferType->getDecl();
// Get the underlying resource type.
const auto underlyingType = hlsl::GetHLSLResourceResultType(type);
const auto *underlyingStructType = underlyingType->getAs<RecordType>();
assert(underlyingStructType &&
"T in ConstantBuffer<T> or TextureBuffer<T> must be a struct type");
const auto *underlyingStructDecl = underlyingStructType->getDecl();
auto loweredFields = lowerStructFields(underlyingStructDecl, rule);
const std::string structName = "type." + bufferDecl->getName().str() + "." +
underlyingStructDecl->getName().str();
const auto *spvStructType = spvContext.getStructType(
loweredFields, structName, /*isReadOnly*/ forTBuffer,
forTBuffer ? StructInterfaceType::StorageBuffer
: StructInterfaceType::UniformBuffer);
spvContext.registerStructDeclForSpirvType(spvStructType, bufferDecl);
return spvStructType;
}
// ByteAddressBuffer and RWByteAddressBuffer types.
if (name == "ByteAddressBuffer" || name == "RWByteAddressBuffer" ||
name == "RasterizerOrderedByteAddressBuffer") {
const auto *bufferType = spvContext.getByteAddressBufferType(
/*isRW*/ name != "ByteAddressBuffer");
if (rule == SpirvLayoutRule::Void) {
// All byte address buffers are in the Uniform storage class.
return spvContext.getPointerType(bufferType, spv::StorageClass::Uniform);
}
return bufferType;
}
// Buffer and RWBuffer types
if (name == "Buffer" || name == "RWBuffer" ||
name == "RasterizerOrderedBuffer") {
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
if (sampledType->isStructureType() &&
(name.startswith("RW") || name.startswith("RasterizerOrdered"))) {
// Note: actually fxc supports RWBuffer over struct types. However, the
// struct member must fit into a 4-component vector and writing to a
// RWBuffer element must write all components. This is a feature that
// are rarely used by developers. We just emit an error saying not
// supported for now.
emitError("cannot instantiate %0 with struct type %1", srcLoc)
<< name << sampledType;
return 0;
}
const auto format = translateSampledTypeToImageFormat(sampledType, srcLoc);
return spvContext.getImageType(
lowerType(getElementType(astContext, sampledType), rule,
/*isRowMajor*/ llvm::None, srcLoc),
spv::Dim::Buffer, ImageType::WithDepth::Unknown,
/*isArrayed=*/false, /*isMultiSampled=*/false,
/*sampled*/ name == "Buffer" ? ImageType::WithSampler::Yes
: ImageType::WithSampler::No,
format);
}
// InputPatch
if (name == "InputPatch") {
const auto elemType = hlsl::GetHLSLInputPatchElementType(type);
const auto elemCount = hlsl::GetHLSLInputPatchCount(type);
return spvContext.getArrayType(
lowerType(elemType, rule, /*isRowMajor*/ llvm::None, srcLoc), elemCount,
/*ArrayStride*/ llvm::None);
}
// OutputPatch
if (name == "OutputPatch") {
const auto elemType = hlsl::GetHLSLOutputPatchElementType(type);
const auto elemCount = hlsl::GetHLSLOutputPatchCount(type);
return spvContext.getArrayType(
lowerType(elemType, rule, /*isRowMajor*/ llvm::None, srcLoc), elemCount,
/*ArrayStride*/ llvm::None);
}
// Output stream objects (TriangleStream, LineStream, and PointStream)
if (name == "TriangleStream" || name == "LineStream" ||
name == "PointStream") {
return lowerType(hlsl::GetHLSLResourceResultType(type), rule,
/*isRowMajor*/ llvm::None, srcLoc);
}
if (name == "SubpassInput" || name == "SubpassInputMS") {
const auto sampledType = hlsl::GetHLSLResourceResultType(type);
return spvContext.getImageType(
lowerType(getElementType(astContext, sampledType), rule,
/*isRowMajor*/ llvm::None, srcLoc),
spv::Dim::SubpassData, ImageType::WithDepth::Unknown,
/*isArrayed=*/false,
/*isMultipleSampled=*/name == "SubpassInputMS",
ImageType::WithSampler::No, spv::ImageFormat::Unknown);
}
return nullptr;
}
llvm::SmallVector<StructType::FieldInfo, 4>
LowerTypeVisitor::lowerStructFields(const RecordDecl *decl,
SpirvLayoutRule rule) {
assert(decl);
// Collect all fields' information.
llvm::SmallVector<HybridStructType::FieldInfo, 8> fields;
// If this struct is derived from some other struct, place an implicit
// field at the very beginning for the base struct.
if (const auto *cxxDecl = dyn_cast<CXXRecordDecl>(decl)) {
for (const auto &base : cxxDecl->bases()) {
fields.push_back(HybridStructType::FieldInfo(base.getType()));
}
}
// Create fields for all members of this struct
for (const auto *field : decl->fields()) {
llvm::Optional<BitfieldInfo> bitfieldInfo;
if (field->isBitField()) {
bitfieldInfo = BitfieldInfo();
bitfieldInfo->sizeInBits =
field->getBitWidthValue(field->getASTContext());
}
if (field->hasAttrs()) {
for (auto &attr : field->getAttrs()) {
if (auto capAttr = dyn_cast<VKCapabilityExtAttr>(attr)) {
spvBuilder.requireCapability(
static_cast<spv::Capability>(capAttr->getCapability()),
capAttr->getLocation());
} else if (auto extAttr = dyn_cast<VKExtensionExtAttr>(attr)) {
spvBuilder.requireExtension(extAttr->getName(),
extAttr->getLocation());
}
}
}
fields.push_back(HybridStructType::FieldInfo(
field->getType(), field->getName(),
/*vkoffset*/ field->getAttr<VKOffsetAttr>(),
/*packoffset*/ getPackOffset(field),
/*RegisterAssignment*/ nullptr,
/*isPrecise*/ field->hasAttr<HLSLPreciseAttr>(),
/*bitfield*/ bitfieldInfo));
}
return populateLayoutInformation(fields, rule);
}
spv::ImageFormat
LowerTypeVisitor::translateSampledTypeToImageFormat(QualType sampledType,
SourceLocation srcLoc) {
uint32_t elemCount = 1;
QualType ty = {};
if (!isScalarType(sampledType, &ty) &&
!isVectorType(sampledType, &ty, &elemCount) &&
!canFitIntoOneRegister(astContext, sampledType, &ty, &elemCount)) {
return spv::ImageFormat::Unknown;
}
const auto *builtinType = ty->getAs<BuiltinType>();
if (builtinType == nullptr) {
return spv::ImageFormat::Unknown;
}
switch (builtinType->getKind()) {
case BuiltinType::Int:
return elemCount == 1 ? spv::ImageFormat::R32i
: elemCount == 2 ? spv::ImageFormat::Rg32i
: elemCount == 4 ? spv::ImageFormat::Rgba32i
: spv::ImageFormat::Unknown;
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
return elemCount == 1 ? spv::ImageFormat::R16i
: elemCount == 2 ? spv::ImageFormat::Rg16i
: elemCount == 4 ? spv::ImageFormat::Rgba16i
: spv::ImageFormat::Unknown;
case BuiltinType::UInt:
return elemCount == 1 ? spv::ImageFormat::R32ui
: elemCount == 2 ? spv::ImageFormat::Rg32ui
: elemCount == 4 ? spv::ImageFormat::Rgba32ui
: spv::ImageFormat::Unknown;
case BuiltinType::Min16UInt:
return elemCount == 1 ? spv::ImageFormat::R16ui
: elemCount == 2 ? spv::ImageFormat::Rg16ui
: elemCount == 4 ? spv::ImageFormat::Rgba16ui
: spv::ImageFormat::Unknown;
case BuiltinType::Float:
return elemCount == 1 ? spv::ImageFormat::R32f
: elemCount == 2 ? spv::ImageFormat::Rg32f
: elemCount == 4 ? spv::ImageFormat::Rgba32f
: spv::ImageFormat::Unknown;
case BuiltinType::HalfFloat:
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
return elemCount == 1 ? spv::ImageFormat::R16f
: elemCount == 2 ? spv::ImageFormat::Rg16f
: elemCount == 4 ? spv::ImageFormat::Rgba16f
: spv::ImageFormat::Unknown;
case BuiltinType::LongLong:
return elemCount == 1 ? spv::ImageFormat::R64i : spv::ImageFormat::Unknown;
case BuiltinType::ULongLong:
return elemCount == 1 ? spv::ImageFormat::R64ui : spv::ImageFormat::Unknown;
default:
// Other sampled types unimplemented or irrelevant.
break;
}
return spv::ImageFormat::Unknown;
}
StructType::FieldInfo
LowerTypeVisitor::lowerField(const HybridStructType::FieldInfo *field,
SpirvLayoutRule rule, const uint32_t fieldIndex) {
auto fieldType = field->astType;
// Lower the field type fist. This call will populate proper matrix
// majorness information.
StructType::FieldInfo loweredField(
lowerType(fieldType, rule, /*isRowMajor*/ llvm::None, {}), fieldIndex,
field->name);
// Set RelaxedPrecision information for the lowered field.
if (isRelaxedPrecisionType(fieldType, spvOptions)) {
loweredField.isRelaxedPrecision = true;
}
if (field->isPrecise) {
loweredField.isPrecise = true;
}
loweredField.bitfield = field->bitfield;
// We only need layout information for structures with non-void layout rule.
if (rule == SpirvLayoutRule::Void) {
return loweredField;
}
// Each structure-type member that is a matrix or array-of-matrices must be
// decorated with
// * A MatrixStride decoration, and
// * one of the RowMajor or ColMajor Decorations.
if (const auto *arrayType = astContext.getAsConstantArrayType(fieldType)) {
// We have an array of matrices as a field, we need to decorate
// MatrixStride on the field. So skip possible arrays here.
fieldType = arrayType->getElementType();
}
// Non-floating point matrices are represented as arrays of vectors, and
// therefore ColMajor and RowMajor decorations should not be applied to
// them.
QualType elemType = {};
if (isMxNMatrix(fieldType, &elemType) && elemType->isFloatingType()) {
uint32_t stride = 0;
alignmentCalc.getAlignmentAndSize(fieldType, rule,
/*isRowMajor*/ llvm::None, &stride);
loweredField.matrixStride = stride;
loweredField.isRowMajor = isRowMajorMatrix(spvOptions, fieldType);
}
return loweredField;
}
llvm::SmallVector<StructType::FieldInfo, 4>
LowerTypeVisitor::populateLayoutInformation(
llvm::ArrayRef<HybridStructType::FieldInfo> fields, SpirvLayoutRule rule) {
auto fieldVisitor = [this,
&rule](const StructType::FieldInfo *previousField,
const HybridStructType::FieldInfo *currentField,
const uint32_t nextFieldIndex) {
StructType::FieldInfo loweredField =
lowerField(currentField, rule, nextFieldIndex);
setDefaultFieldSize(alignmentCalc, rule, currentField, &loweredField);
// We only need size information for structures with non-void layout &
// non-bitfield fields.
if (rule == SpirvLayoutRule::Void && !currentField->bitfield.hasValue())
return loweredField;
// We only need layout information for structures with non-void layout rule.
if (rule != SpirvLayoutRule::Void) {
const uint32_t previousFieldEnd =
previousField ? previousField->offset.getValue() +
previousField->sizeInBytes.getValue()
: 0;
setDefaultFieldOffset(alignmentCalc, rule, previousFieldEnd, currentField,
&loweredField);
// The vk::offset attribute takes precedence over all.
if (currentField->vkOffsetAttr) {
loweredField.offset = currentField->vkOffsetAttr->getOffset();
return loweredField;
}
// The :packoffset() annotation takes precedence over normal layout
// calculation.
if (currentField->packOffsetAttr) {
const uint32_t offset =
currentField->packOffsetAttr->Subcomponent * 16 +
currentField->packOffsetAttr->ComponentOffset * 4;
// Do minimal check to make sure the offset specified by packoffset does
// not cause overlap.
if (offset < previousFieldEnd) {
emitError("packoffset caused overlap with previous members",
currentField->packOffsetAttr->Loc);
}
loweredField.offset = offset;
return loweredField;
}
// The :register(c#) annotation takes precedence over normal layout
// calculation.
if (currentField->registerC) {
const uint32_t offset = 16 * currentField->registerC->RegisterNumber;
// Do minimal check to make sure the offset specified by :register(c#)
// does not cause overlap.
if (offset < previousFieldEnd) {
emitError(
"found offset overlap when processing register(c%0) assignment",
currentField->registerC->Loc)
<< currentField->registerC->RegisterNumber;
}
loweredField.offset = offset;
return loweredField;
}
}
if (!currentField->bitfield.hasValue())
return loweredField;
// Previous field is a full type, cannot merge.
if (!previousField || !previousField->bitfield.hasValue())
return loweredField;
// Bitfields can only be merged if they have the exact base type.
// (SPIR-V cannot handle mixed-types bitfields).
if (previousField->type != loweredField.type)
return loweredField;
const uint32_t basetypeSize = previousField->sizeInBytes.getValue() * 8;
const auto &previousBitfield = previousField->bitfield.getValue();
const uint32_t nextAvailableBit =
previousBitfield.offsetInBits + previousBitfield.sizeInBits;
if (nextAvailableBit + currentField->bitfield->sizeInBits > basetypeSize)
return loweredField;
loweredField.bitfield->offsetInBits = nextAvailableBit;
loweredField.offset = previousField->offset;
loweredField.fieldIndex = previousField->fieldIndex;
return loweredField;
};
// First, check to see if any of the structure members had 'register(c#)'
// location semantics. If so, members that do not have the 'register(c#)'
// assignment should be allocated after the *highest explicit address*.
// Example:
// float x : register(c10); // Offset = 160 (10 * 16)
// float y; // Offset = 164 (160 + 4)
// float z: register(c1); // Offset = 16 (1 * 16)
//
// This step is only here to simplify the struct layout generation.
std::vector<const HybridStructType::FieldInfo *> sortedFields =
sortFields(fields);
// The resulting vector of fields with proper layout information.
// Second, build each field, and determine their actual offset in the
// structure (explicit layout, bitfield merging, etc).
llvm::SmallVector<StructType::FieldInfo, 4> loweredFields;
llvm::DenseMap<const HybridStructType::FieldInfo *, uint32_t> fieldToIndexMap;
// This stores the index of the field in the actual SPIR-V construct.
// When bitfields are merged, this index will be the same for merged fields.
uint32_t fieldIndexInConstruct = 0;
for (size_t i = 0, iPrevious = -1; i < sortedFields.size(); iPrevious = i++) {
const size_t fieldIndexForMap = loweredFields.size();
loweredFields.emplace_back(fieldVisitor(
(iPrevious < loweredFields.size() ? &loweredFields[iPrevious]
: nullptr),
sortedFields[i], fieldIndexInConstruct));
if (!(iPrevious < loweredFields.size()) ||
loweredFields[iPrevious].fieldIndex !=
loweredFields.back().fieldIndex) {
fieldIndexInConstruct++;
}
fieldToIndexMap[sortedFields[i]] = fieldIndexForMap;
}
// Re-order the sorted fields back to their original order.
llvm::SmallVector<StructType::FieldInfo, 4> result;
for (const auto &field : fields)
result.push_back(loweredFields[fieldToIndexMap[&field]]);
return result;
}
} // namespace spirv
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/NonUniformVisitor.cpp | //===--- NonUniformVisitor.cpp - NonUniform Visitor --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "NonUniformVisitor.h"
namespace clang {
namespace spirv {
bool NonUniformVisitor::visit(SpirvLoad *instr) {
if (instr->getPointer()->isNonUniform())
instr->setNonUniform();
return true;
}
bool NonUniformVisitor::visit(SpirvAccessChain *instr) {
bool isNonUniform = instr->isNonUniform() || instr->getBase()->isNonUniform();
for (auto *index : instr->getIndexes())
isNonUniform = isNonUniform || index->isNonUniform();
instr->setNonUniform(isNonUniform);
return true;
}
bool NonUniformVisitor::visit(SpirvUnaryOp *instr) {
if (instr->getOperand()->isNonUniform())
instr->setNonUniform();
return true;
}
bool NonUniformVisitor::visit(SpirvBinaryOp *instr) {
if (instr->getOperand1()->isNonUniform() ||
instr->getOperand2()->isNonUniform())
instr->setNonUniform();
return true;
}
bool NonUniformVisitor::visit(SpirvSampledImage *instr) {
if (instr->getImage()->isNonUniform() || instr->getSampler()->isNonUniform())
instr->setNonUniform();
return true;
}
bool NonUniformVisitor::visit(SpirvImageTexelPointer *instr) {
if (instr->getImage()->isNonUniform())
instr->setNonUniform();
return true;
}
bool NonUniformVisitor::visit(SpirvAtomic *instr) {
if (instr->getPointer()->isNonUniform())
instr->setNonUniform();
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/PreciseVisitor.h | //===--- PreciseVisitor.h ---- Precise Visitor -------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SPIRV_PRECISEVISITOR_H
#define LLVM_CLANG_LIB_SPIRV_PRECISEVISITOR_H
#include "clang/SPIRV/SpirvVisitor.h"
namespace clang {
namespace spirv {
class PreciseVisitor : public Visitor {
public:
PreciseVisitor(SpirvContext &spvCtx, const SpirvCodeGenOptions &opts)
: Visitor(opts, spvCtx) {}
bool visit(SpirvFunction *, Phase) override;
bool visit(SpirvVariable *) override;
bool visit(SpirvReturn *) override;
bool visit(SpirvSelect *) override;
bool visit(SpirvVectorShuffle *) override;
bool visit(SpirvBitFieldExtract *) override;
bool visit(SpirvBitFieldInsert *) override;
bool visit(SpirvAtomic *) override;
bool visit(SpirvCompositeConstruct *) override;
bool visit(SpirvCompositeExtract *) override;
bool visit(SpirvCompositeInsert *) override;
bool visit(SpirvLoad *) override;
bool visit(SpirvStore *) override;
bool visit(SpirvBinaryOp *) override;
bool visit(SpirvUnaryOp *) override;
bool visit(SpirvGroupNonUniformOp *) override;
bool visit(SpirvExtInst *) override;
bool visit(SpirvFunctionCall *) override;
using Visitor::visit;
// TODO: Support propagation of 'precise' through OpSpecConstantOp and image
// operations if necessary. Related instruction classes are:
// SpirvSpecConstantBinaryOp, SpirvSpecConstantUnaryOp
// SpirvImageOp, SpirvImageQuery, SpirvImageTexelPointer, SpirvSampledImage
private:
bool curFnRetValPrecise; ///< Whether current function is 'precise'
};
} // end namespace spirv
} // end namespace clang
#endif // LLVM_CLANG_LIB_SPIRV_PRECISEVISITOR_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/SPIRV/PreciseVisitor.cpp | //===--- PreciseVisitor.cpp ------- Precise Visitor --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "PreciseVisitor.h"
#include "clang/SPIRV/AstTypeProbe.h"
#include "clang/SPIRV/SpirvFunction.h"
#include "clang/SPIRV/SpirvType.h"
#include <stack>
namespace {
/// \brief Returns true if the given OpAccessChain instruction is accessing a
/// precise variable, or accessing a precise member of a structure. Returns
/// false otherwise.
bool isAccessingPrecise(clang::spirv::SpirvAccessChain *inst) {
using namespace clang::spirv;
// If the access chain base is another access chain and so on, first flatten
// them (from the bottom to the top). For example:
// %x = OpAccessChain <type> %obj %int_1 %int_2
// %y = OpAccessChain <type> %x %int_3 %int_4
// %z = OpAccessChain <type> %y %int_5 %int_6
// Should be flattened to:
// %z = OpAccessChain <type> %obj %int_1 %int_2 %int_3 %int_4 %int_5 %int_6
std::stack<SpirvInstruction *> indexes;
SpirvInstruction *base = inst;
while (auto *accessChain = llvm::dyn_cast<SpirvAccessChain>(base)) {
for (auto iter = accessChain->getIndexes().rbegin();
iter != accessChain->getIndexes().rend(); ++iter) {
indexes.push(*iter);
}
base = accessChain->getBase();
// If we reach a 'precise' base at any level, return true.
if (base->isPrecise())
return true;
}
// Start from the lowest level base (%obj in the above example), and step
// forward using the 'indexes'. If a 'precise' structure field is discovered
// at any point, return true.
const SpirvType *baseType = base->getResultType();
while (baseType && !indexes.empty()) {
if (auto *vecType = llvm::dyn_cast<VectorType>(baseType)) {
indexes.pop();
baseType = vecType->getElementType();
} else if (auto *matType = llvm::dyn_cast<MatrixType>(baseType)) {
indexes.pop();
baseType = matType->getVecType();
} else if (auto *arrType = llvm::dyn_cast<ArrayType>(baseType)) {
indexes.pop();
baseType = arrType->getElementType();
} else if (auto *raType = llvm::dyn_cast<RuntimeArrayType>(baseType)) {
indexes.pop();
baseType = raType->getElementType();
} else if (auto *structType = llvm::dyn_cast<StructType>(baseType)) {
SpirvInstruction *index = indexes.top();
if (auto *constInt = llvm::dyn_cast<SpirvConstantInteger>(index)) {
uint32_t indexValue =
static_cast<uint32_t>(constInt->getValue().getZExtValue());
auto fields = structType->getFields();
assert(indexValue < fields.size());
auto &fieldInfo = fields[indexValue];
if (fieldInfo.isPrecise) {
return true;
} else {
baseType = fieldInfo.type;
indexes.pop();
}
} else {
// Trying to index into a structure using a variable? This shouldn't be
// happening.
assert(false && "indexing into a struct with variable value");
return false;
}
} else if (auto *ptrType = llvm::dyn_cast<SpirvPointerType>(baseType)) {
// Note: no need to pop the stack here.
baseType = ptrType->getPointeeType();
} else {
return false;
}
}
return false;
}
} // anonymous namespace
namespace clang {
namespace spirv {
bool PreciseVisitor::visit(SpirvFunction *fn, Phase phase) {
// Before going through the function instructions
if (phase == Visitor::Phase::Init) {
curFnRetValPrecise = fn->isPrecise();
}
return true;
}
bool PreciseVisitor::visit(SpirvReturn *inst) {
if (inst->hasReturnValue()) {
inst->getReturnValue()->setPrecise(curFnRetValPrecise);
}
return true;
}
bool PreciseVisitor::visit(SpirvVariable *var) {
if (var->hasInitializer())
var->getInitializer()->setPrecise(var->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvSelect *inst) {
inst->getTrueObject()->setPrecise(inst->isPrecise());
inst->getFalseObject()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvVectorShuffle *inst) {
// If the result of a vector shuffle is 'precise', the vectors from which the
// elements are chosen should also be 'precise'.
if (inst->isPrecise()) {
auto *vec1 = inst->getVec1();
auto *vec2 = inst->getVec2();
const auto vec1Type = vec1->getAstResultType();
const auto vec2Type = vec2->getAstResultType();
uint32_t vec1Size;
uint32_t vec2Size;
(void)isVectorType(vec1Type, nullptr, &vec1Size);
(void)isVectorType(vec2Type, nullptr, &vec2Size);
bool vec1ElemUsed = false;
bool vec2ElemUsed = false;
for (auto component : inst->getComponents()) {
if (component < vec1Size)
vec1ElemUsed = true;
else
vec2ElemUsed = true;
}
if (vec1ElemUsed)
vec1->setPrecise();
if (vec2ElemUsed)
vec2->setPrecise();
}
return true;
}
bool PreciseVisitor::visit(SpirvBitFieldExtract *inst) {
inst->getBase()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvBitFieldInsert *inst) {
inst->getBase()->setPrecise(inst->isPrecise());
inst->getInsert()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvAtomic *inst) {
if (inst->isPrecise() && inst->hasValue())
inst->getValue()->setPrecise();
return true;
}
bool PreciseVisitor::visit(SpirvCompositeConstruct *inst) {
if (inst->isPrecise())
for (auto *consituent : inst->getConstituents())
consituent->setPrecise();
return true;
}
bool PreciseVisitor::visit(SpirvCompositeExtract *inst) {
inst->getComposite()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvCompositeInsert *inst) {
inst->getComposite()->setPrecise(inst->isPrecise());
inst->getObject()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvLoad *inst) {
// If the instruction result is precise, the pointer we're loading from should
// also be marked as precise.
if (inst->isPrecise())
inst->getPointer()->setPrecise();
return true;
}
bool PreciseVisitor::visit(SpirvStore *inst) {
// If the 'pointer' to which we are storing is marked as 'precise', the object
// we are storing should also be marked as 'precise'.
// Note that the 'pointer' may either be an 'OpVariable' or it might be the
// result of one or more access chains (in which case we should figure out if
// the 'base' of the access chain is 'precise').
auto *ptr = inst->getPointer();
auto *obj = inst->getObject();
// The simple case (target is a precise variable).
if (ptr->isPrecise()) {
obj->setPrecise();
return true;
}
if (auto *accessChain = llvm::dyn_cast<SpirvAccessChain>(ptr)) {
if (isAccessingPrecise(accessChain)) {
obj->setPrecise();
return true;
}
}
return true;
}
bool PreciseVisitor::visit(SpirvBinaryOp *inst) {
bool isPrecise = inst->isPrecise();
inst->getOperand1()->setPrecise(isPrecise);
inst->getOperand2()->setPrecise(isPrecise);
return true;
}
bool PreciseVisitor::visit(SpirvUnaryOp *inst) {
inst->getOperand()->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvGroupNonUniformOp *inst) {
for (auto *operand : inst->getOperands())
operand->setPrecise(inst->isPrecise());
return true;
}
bool PreciseVisitor::visit(SpirvExtInst *inst) {
if (inst->isPrecise())
for (auto *operand : inst->getOperands())
operand->setPrecise();
return true;
}
bool PreciseVisitor::visit(SpirvFunctionCall *call) {
// If a formal parameter for the function is precise, then the corresponding
// actual parameter should be marked as precise.
auto function = call->getFunction();
for (uint32_t i = 0; i < call->getArgs().size(); ++i) {
auto formalParameter = function->getParameters()[i];
if (!formalParameter->isPrecise()) {
continue;
}
call->getArgs()[i]->setPrecise();
}
return true;
}
} // end namespace spirv
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CommentBriefParser.cpp | //===--- CommentBriefParser.cpp - Dumb comment parser ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentBriefParser.h"
#include "clang/AST/CommentCommandTraits.h"
#include "llvm/ADT/StringSwitch.h"
namespace clang {
namespace comments {
namespace {
inline bool isWhitespace(char C) {
return C == ' ' || C == '\n' || C == '\r' ||
C == '\t' || C == '\f' || C == '\v';
}
/// Convert all whitespace into spaces, remove leading and trailing spaces,
/// compress multiple spaces into one.
void cleanupBrief(std::string &S) {
bool PrevWasSpace = true;
std::string::iterator O = S.begin();
for (std::string::iterator I = S.begin(), E = S.end();
I != E; ++I) {
const char C = *I;
if (isWhitespace(C)) {
if (!PrevWasSpace) {
*O++ = ' ';
PrevWasSpace = true;
}
continue;
} else {
*O++ = C;
PrevWasSpace = false;
}
}
if (O != S.begin() && *(O - 1) == ' ')
--O;
S.resize(O - S.begin());
}
bool isWhitespace(StringRef Text) {
for (StringRef::const_iterator I = Text.begin(), E = Text.end();
I != E; ++I) {
if (!isWhitespace(*I))
return false;
}
return true;
}
} // unnamed namespace
BriefParser::BriefParser(Lexer &L, const CommandTraits &Traits) :
L(L), Traits(Traits) {
// Get lookahead token.
ConsumeToken();
}
std::string BriefParser::Parse() {
std::string FirstParagraphOrBrief;
std::string ReturnsParagraph;
bool InFirstParagraph = true;
bool InBrief = false;
bool InReturns = false;
while (Tok.isNot(tok::eof)) {
if (Tok.is(tok::text)) {
if (InFirstParagraph || InBrief)
FirstParagraphOrBrief += Tok.getText();
else if (InReturns)
ReturnsParagraph += Tok.getText();
ConsumeToken();
continue;
}
if (Tok.is(tok::backslash_command) || Tok.is(tok::at_command)) {
const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
if (Info->IsBriefCommand) {
FirstParagraphOrBrief.clear();
InBrief = true;
ConsumeToken();
continue;
}
if (Info->IsReturnsCommand) {
InReturns = true;
InBrief = false;
InFirstParagraph = false;
ReturnsParagraph += "Returns ";
ConsumeToken();
continue;
}
// Block commands implicitly start a new paragraph.
if (Info->IsBlockCommand) {
// We found an implicit paragraph end.
InFirstParagraph = false;
if (InBrief)
break;
}
}
if (Tok.is(tok::newline)) {
if (InFirstParagraph || InBrief)
FirstParagraphOrBrief += ' ';
else if (InReturns)
ReturnsParagraph += ' ';
ConsumeToken();
// If the next token is a whitespace only text, ignore it. Thus we allow
// two paragraphs to be separated by line that has only whitespace in it.
//
// We don't need to add a space to the parsed text because we just added
// a space for the newline.
if (Tok.is(tok::text)) {
if (isWhitespace(Tok.getText()))
ConsumeToken();
}
if (Tok.is(tok::newline)) {
ConsumeToken();
// We found a paragraph end. This ends the brief description if
// \\brief command or its equivalent was explicitly used.
// Stop scanning text because an explicit \\brief paragraph is the
// preffered one.
if (InBrief)
break;
// End first paragraph if we found some non-whitespace text.
if (InFirstParagraph && !isWhitespace(FirstParagraphOrBrief))
InFirstParagraph = false;
// End the \\returns paragraph because we found the paragraph end.
InReturns = false;
}
continue;
}
// We didn't handle this token, so just drop it.
ConsumeToken();
}
cleanupBrief(FirstParagraphOrBrief);
if (!FirstParagraphOrBrief.empty())
return FirstParagraphOrBrief;
cleanupBrief(ReturnsParagraph);
return ReturnsParagraph;
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/SelectorLocationsKind.cpp | //===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Describes whether the identifier locations for a selector are "standard"
// or not.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/AST/Expr.h"
using namespace clang;
static SourceLocation getStandardSelLoc(unsigned Index,
Selector Sel,
bool WithArgSpace,
SourceLocation ArgLoc,
SourceLocation EndLoc) {
unsigned NumSelArgs = Sel.getNumArgs();
if (NumSelArgs == 0) {
assert(Index == 0);
if (EndLoc.isInvalid())
return SourceLocation();
IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
unsigned Len = II ? II->getLength() : 0;
return EndLoc.getLocWithOffset(-Len);
}
assert(Index < NumSelArgs);
if (ArgLoc.isInvalid())
return SourceLocation();
IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1;
if (WithArgSpace)
++Len;
return ArgLoc.getLocWithOffset(-Len);
}
namespace {
template <typename T>
SourceLocation getArgLoc(T* Arg);
template <>
SourceLocation getArgLoc<Expr>(Expr *Arg) {
return Arg->getLocStart();
}
template <>
SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) {
SourceLocation Loc = Arg->getLocStart();
if (Loc.isInvalid())
return Loc;
// -1 to point to left paren of the method parameter's type.
return Loc.getLocWithOffset(-1);
}
template <typename T>
SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) {
return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation();
}
template <typename T>
SelectorLocationsKind hasStandardSelLocs(Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ArrayRef<T *> Args,
SourceLocation EndLoc) {
// Are selector locations in standard position with no space between args ?
unsigned i;
for (i = 0; i != SelLocs.size(); ++i) {
if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/false,
Args, EndLoc))
break;
}
if (i == SelLocs.size())
return SelLoc_StandardNoSpace;
// Are selector locations in standard position with space between args ?
for (i = 0; i != SelLocs.size(); ++i) {
if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/true,
Args, EndLoc))
return SelLoc_NonStandard;
}
return SelLoc_StandardWithSpace;
}
} // anonymous namespace
SelectorLocationsKind
clang::hasStandardSelectorLocs(Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ArrayRef<Expr *> Args,
SourceLocation EndLoc) {
return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
}
SourceLocation clang::getStandardSelectorLoc(unsigned Index,
Selector Sel,
bool WithArgSpace,
ArrayRef<Expr *> Args,
SourceLocation EndLoc) {
return getStandardSelLoc(Index, Sel, WithArgSpace,
getArgLoc(Index, Args), EndLoc);
}
SelectorLocationsKind
clang::hasStandardSelectorLocs(Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ArrayRef<ParmVarDecl *> Args,
SourceLocation EndLoc) {
return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
}
SourceLocation clang::getStandardSelectorLoc(unsigned Index,
Selector Sel,
bool WithArgSpace,
ArrayRef<ParmVarDecl *> Args,
SourceLocation EndLoc) {
return getStandardSelLoc(Index, Sel, WithArgSpace,
getArgLoc(Index, Args), EndLoc);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclGroup.cpp | //===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DeclGroup and DeclGroupRef classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclGroup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/Allocator.h"
using namespace clang;
DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
static_assert(sizeof(DeclGroup) % llvm::AlignOf<void *>::Alignment == 0,
"Trailing data is unaligned!");
assert(NumDecls > 1 && "Invalid DeclGroup");
unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls;
void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
new (Mem) DeclGroup(NumDecls, Decls);
return static_cast<DeclGroup*>(Mem);
}
DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
assert(numdecls > 0);
assert(decls);
memcpy(this+1, decls, numdecls * sizeof(*decls));
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclTemplate.cpp | //===--- DeclTemplate.cpp - Template Declaration AST Node Implementation --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the C++ related Decl classes for templates.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
#include <memory>
using namespace clang;
//===----------------------------------------------------------------------===//
// TemplateParameterList Implementation
//===----------------------------------------------------------------------===//
TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
NamedDecl **Params, unsigned NumParams,
SourceLocation RAngleLoc)
: TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
NumParams(NumParams), ContainsUnexpandedParameterPack(false) {
assert(this->NumParams == NumParams && "Too many template parameters");
for (unsigned Idx = 0; Idx < NumParams; ++Idx) {
NamedDecl *P = Params[Idx];
begin()[Idx] = P;
if (!P->isTemplateParameterPack()) {
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
if (NTTP->getType()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
if (TTP->getTemplateParameters()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
// FIXME: If a default argument contains an unexpanded parameter pack, the
// template parameter list does too.
}
}
}
TemplateParameterList *
TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
SourceLocation LAngleLoc, NamedDecl **Params,
unsigned NumParams, SourceLocation RAngleLoc) {
unsigned Size = sizeof(TemplateParameterList)
+ sizeof(NamedDecl *) * NumParams;
unsigned Align = std::max(llvm::alignOf<TemplateParameterList>(),
llvm::alignOf<NamedDecl*>());
void *Mem = C.Allocate(Size, Align);
return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
NumParams, RAngleLoc);
}
unsigned TemplateParameterList::getMinRequiredArguments() const {
unsigned NumRequiredArgs = 0;
for (iterator P = const_cast<TemplateParameterList *>(this)->begin(),
PEnd = const_cast<TemplateParameterList *>(this)->end();
P != PEnd; ++P) {
if ((*P)->isTemplateParameterPack()) {
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
if (NTTP->isExpandedParameterPack()) {
NumRequiredArgs += NTTP->getNumExpansionTypes();
continue;
}
break;
}
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
if (TTP->hasDefaultArgument())
break;
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->hasDefaultArgument())
break;
} else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument())
break;
++NumRequiredArgs;
}
return NumRequiredArgs;
}
unsigned TemplateParameterList::getDepth() const {
if (size() == 0)
return 0;
const NamedDecl *FirstParm = getParam(0);
if (const TemplateTypeParmDecl *TTP
= dyn_cast<TemplateTypeParmDecl>(FirstParm))
return TTP->getDepth();
else if (const NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
return NTTP->getDepth();
else
return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
}
static void AdoptTemplateParameterList(TemplateParameterList *Params,
DeclContext *Owner) {
for (TemplateParameterList::iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
(*P)->setDeclContext(Owner);
if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*P))
AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
}
}
namespace clang {
void *allocateDefaultArgStorageChain(const ASTContext &C) {
return new (C) char[sizeof(void*) * 2];
}
}
//===----------------------------------------------------------------------===//
// RedeclarableTemplateDecl Implementation
//===----------------------------------------------------------------------===//
RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() const {
if (Common)
return Common;
// Walk the previous-declaration chain until we either find a declaration
// with a common pointer or we run out of previous declarations.
SmallVector<const RedeclarableTemplateDecl *, 2> PrevDecls;
for (const RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev;
Prev = Prev->getPreviousDecl()) {
if (Prev->Common) {
Common = Prev->Common;
break;
}
PrevDecls.push_back(Prev);
}
// If we never found a common pointer, allocate one now.
if (!Common) {
// FIXME: If any of the declarations is from an AST file, we probably
// need an update record to add the common data.
Common = newCommon(getASTContext());
}
// Update any previous declarations we saw with the common pointer.
for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I)
PrevDecls[I]->Common = Common;
return Common;
}
template<class EntryType>
typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType *
RedeclarableTemplateDecl::findSpecializationImpl(
llvm::FoldingSetVector<EntryType> &Specs, ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
typedef SpecEntryTraits<EntryType> SETraits;
llvm::FoldingSetNodeID ID;
EntryType::Profile(ID,Args, getASTContext());
EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
return Entry ? SETraits::getDecl(Entry)->getMostRecentDecl() : nullptr;
}
template<class Derived, class EntryType>
void RedeclarableTemplateDecl::addSpecializationImpl(
llvm::FoldingSetVector<EntryType> &Specializations, EntryType *Entry,
void *InsertPos) {
typedef SpecEntryTraits<EntryType> SETraits;
if (InsertPos) {
#ifndef NDEBUG
void *CorrectInsertPos;
assert(!findSpecializationImpl(Specializations,
SETraits::getTemplateArgs(Entry),
CorrectInsertPos) &&
InsertPos == CorrectInsertPos &&
"given incorrect InsertPos for specialization");
#endif
Specializations.InsertNode(Entry, InsertPos);
} else {
EntryType *Existing = Specializations.GetOrInsertNode(Entry);
(void)Existing;
assert(SETraits::getDecl(Existing)->isCanonicalDecl() &&
"non-canonical specialization?");
}
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(cast<Derived>(this),
SETraits::getDecl(Entry));
}
/// \brief Generate the injected template arguments for the given template
/// parameter list, e.g., for the injected-class-name of a class template.
static void GenerateInjectedTemplateArgs(ASTContext &Context,
TemplateParameterList *Params,
TemplateArgument *Args) {
for (TemplateParameterList::iterator Param = Params->begin(),
ParamEnd = Params->end();
Param != ParamEnd; ++Param) {
TemplateArgument Arg;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
QualType ArgType = Context.getTypeDeclType(TTP);
if (TTP->isParameterPack())
ArgType = Context.getPackExpansionType(ArgType, None);
Arg = TemplateArgument(ArgType);
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
NTTP->getType().getNonLValueExprType(Context),
Expr::getValueKindForType(NTTP->getType()),
NTTP->getLocation());
if (NTTP->isParameterPack())
E = new (Context) PackExpansionExpr(Context.DependentTy, E,
NTTP->getLocation(), None);
Arg = TemplateArgument(E);
} else {
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
if (TTP->isParameterPack())
Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
else
Arg = TemplateArgument(TemplateName(TTP));
}
if ((*Param)->isTemplateParameterPack())
Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1);
*Args++ = Arg;
}
}
//===----------------------------------------------------------------------===//
// FunctionTemplateDecl Implementation
//===----------------------------------------------------------------------===//
void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
static_cast<Common *>(Ptr)->~Common();
}
FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
DeclarationName Name,
TemplateParameterList *Params,
NamedDecl *Decl) {
AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
return new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl);
}
FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) FunctionTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
RedeclarableTemplateDecl::CommonBase *
FunctionTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
void FunctionTemplateDecl::LoadLazySpecializations() const {
// Grab the most recent declaration to ensure we've loaded any lazy
// redeclarations of this template.
//
// FIXME: Avoid walking the entire redeclaration chain here.
Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
if (CommonPtr->LazySpecializations) {
ASTContext &Context = getASTContext();
uint32_t *Specs = CommonPtr->LazySpecializations;
CommonPtr->LazySpecializations = nullptr;
for (uint32_t I = 0, N = *Specs++; I != N; ++I)
(void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
}
}
llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> &
FunctionTemplateDecl::getSpecializations() const {
LoadLazySpecializations();
return getCommonPtr()->Specializations;
}
FunctionDecl *
FunctionTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
return findSpecializationImpl(getSpecializations(), Args, InsertPos);
}
void FunctionTemplateDecl::addSpecialization(
FunctionTemplateSpecializationInfo *Info, void *InsertPos) {
addSpecializationImpl<FunctionTemplateDecl>(getSpecializations(), Info,
InsertPos);
}
ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
TemplateParameterList *Params = getTemplateParameters();
Common *CommonPtr = getCommonPtr();
if (!CommonPtr->InjectedArgs) {
CommonPtr->InjectedArgs
= new (getASTContext()) TemplateArgument[Params->size()];
GenerateInjectedTemplateArgs(getASTContext(), Params,
CommonPtr->InjectedArgs);
}
return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
}
//===----------------------------------------------------------------------===//
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
static_cast<Common *>(Ptr)->~Common();
}
ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
DeclarationName Name,
TemplateParameterList *Params,
NamedDecl *Decl,
ClassTemplateDecl *PrevDecl) {
AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
ClassTemplateDecl *New = new (C, DC) ClassTemplateDecl(C, DC, L, Name,
Params, Decl);
New->setPreviousDecl(PrevDecl);
return New;
}
ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ClassTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
void ClassTemplateDecl::LoadLazySpecializations() const {
// Grab the most recent declaration to ensure we've loaded any lazy
// redeclarations of this template.
//
// FIXME: Avoid walking the entire redeclaration chain here.
Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
if (CommonPtr->LazySpecializations) {
ASTContext &Context = getASTContext();
uint32_t *Specs = CommonPtr->LazySpecializations;
CommonPtr->LazySpecializations = nullptr;
for (uint32_t I = 0, N = *Specs++; I != N; ++I)
(void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
}
}
llvm::FoldingSetVector<ClassTemplateSpecializationDecl> &
ClassTemplateDecl::getSpecializations() const {
LoadLazySpecializations();
return getCommonPtr()->Specializations;
}
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &
ClassTemplateDecl::getPartialSpecializations() {
LoadLazySpecializations();
return getCommonPtr()->PartialSpecializations;
}
RedeclarableTemplateDecl::CommonBase *
ClassTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
ClassTemplateSpecializationDecl *
ClassTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
return findSpecializationImpl(getSpecializations(), Args, InsertPos);
}
void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
void *InsertPos) {
addSpecializationImpl<ClassTemplateDecl>(getSpecializations(), D, InsertPos);
}
ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
return findSpecializationImpl(getPartialSpecializations(), Args, InsertPos);
}
void ClassTemplateDecl::AddPartialSpecialization(
ClassTemplatePartialSpecializationDecl *D,
void *InsertPos) {
if (InsertPos)
getPartialSpecializations().InsertNode(D, InsertPos);
else {
ClassTemplatePartialSpecializationDecl *Existing
= getPartialSpecializations().GetOrInsertNode(D);
(void)Existing;
assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
}
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(this, D);
}
void ClassTemplateDecl::getPartialSpecializations(
SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> &PartialSpecs
= getPartialSpecializations();
PS.clear();
PS.reserve(PartialSpecs.size());
for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
P != PEnd; ++P)
PS.push_back(P->getMostRecentDecl());
}
ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecialization(QualType T) {
ASTContext &Context = getASTContext();
using llvm::FoldingSetVector;
typedef FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
partial_spec_iterator;
for (partial_spec_iterator P = getPartialSpecializations().begin(),
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
if (Context.hasSameType(P->getInjectedSpecializationType(), T))
return P->getMostRecentDecl();
}
return nullptr;
}
ClassTemplatePartialSpecializationDecl *
ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
ClassTemplatePartialSpecializationDecl *D) {
Decl *DCanon = D->getCanonicalDecl();
for (llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl>::iterator
P = getPartialSpecializations().begin(),
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
return P->getMostRecentDecl();
}
return nullptr;
}
QualType
ClassTemplateDecl::getInjectedClassNameSpecialization() {
Common *CommonPtr = getCommonPtr();
if (!CommonPtr->InjectedClassNameType.isNull())
return CommonPtr->InjectedClassNameType;
// C++0x [temp.dep.type]p2:
// The template argument list of a primary template is a template argument
// list in which the nth template argument has the value of the nth template
// parameter of the class template. If the nth template parameter is a
// template parameter pack (14.5.3), the nth template argument is a pack
// expansion (14.5.3) whose pattern is the name of the template parameter
// pack.
ASTContext &Context = getASTContext();
TemplateParameterList *Params = getTemplateParameters();
SmallVector<TemplateArgument, 16> TemplateArgs;
TemplateArgs.resize(Params->size());
GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
CommonPtr->InjectedClassNameType
= Context.getTemplateSpecializationType(TemplateName(this),
&TemplateArgs[0],
TemplateArgs.size());
return CommonPtr->InjectedClassNameType;
}
//===----------------------------------------------------------------------===//
// TemplateTypeParm Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
TemplateTypeParmDecl *
TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation KeyLoc, SourceLocation NameLoc,
unsigned D, unsigned P, IdentifierInfo *Id,
bool Typename, bool ParameterPack) {
TemplateTypeParmDecl *TTPDecl =
new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
TTPDecl->setTypeForDecl(TTPType.getTypePtr());
return TTPDecl;
}
TemplateTypeParmDecl *
TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
return new (C, ID) TemplateTypeParmDecl(nullptr, SourceLocation(),
SourceLocation(), nullptr, false);
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
return hasDefaultArgument()
? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc()
: SourceLocation();
}
SourceRange TemplateTypeParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getLocStart(),
getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
else
return TypeDecl::getSourceRange();
}
unsigned TemplateTypeParmDecl::getDepth() const {
return getTypeForDecl()->getAs<TemplateTypeParmType>()->getDepth();
}
unsigned TemplateTypeParmDecl::getIndex() const {
return getTypeForDecl()->getAs<TemplateTypeParmType>()->getIndex();
}
bool TemplateTypeParmDecl::isParameterPack() const {
return getTypeForDecl()->getAs<TemplateTypeParmType>()->isParameterPack();
}
//===----------------------------------------------------------------------===//
// NonTypeTemplateParmDecl Method Implementations
//===----------------------------------------------------------------------===//
NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc,
unsigned D, unsigned P,
IdentifierInfo *Id,
QualType T,
TypeSourceInfo *TInfo,
const QualType *ExpandedTypes,
unsigned NumExpandedTypes,
TypeSourceInfo **ExpandedTInfos)
: DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
TemplateParmPosition(D, P), ParameterPack(true),
ExpandedParameterPack(true), NumExpandedTypes(NumExpandedTypes) {
if (ExpandedTypes && ExpandedTInfos) {
void **TypesAndInfos = reinterpret_cast<void **>(this + 1);
for (unsigned I = 0; I != NumExpandedTypes; ++I) {
TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr();
TypesAndInfos[2*I + 1] = ExpandedTInfos[I];
}
}
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
unsigned D, unsigned P, IdentifierInfo *Id,
QualType T, bool ParameterPack,
TypeSourceInfo *TInfo) {
return new (C, DC) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
T, ParameterPack, TInfo);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
unsigned D, unsigned P,
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo,
const QualType *ExpandedTypes,
unsigned NumExpandedTypes,
TypeSourceInfo **ExpandedTInfos) {
unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
return new (C, DC, Extra) NonTypeTemplateParmDecl(
DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) NonTypeTemplateParmDecl(nullptr, SourceLocation(),
SourceLocation(), 0, 0, nullptr,
QualType(), false, nullptr);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpandedTypes) {
unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
return new (C, ID, Extra) NonTypeTemplateParmDecl(
nullptr, SourceLocation(), SourceLocation(), 0, 0, nullptr, QualType(),
nullptr, nullptr, NumExpandedTypes, nullptr);
}
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getOuterLocStart(),
getDefaultArgument()->getSourceRange().getEnd());
return DeclaratorDecl::getSourceRange();
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
return hasDefaultArgument()
? getDefaultArgument()->getSourceRange().getBegin()
: SourceLocation();
}
//===----------------------------------------------------------------------===//
// TemplateTemplateParmDecl Method Implementations
//===----------------------------------------------------------------------===//
void TemplateTemplateParmDecl::anchor() { }
TemplateTemplateParmDecl::TemplateTemplateParmDecl(
DeclContext *DC, SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id, TemplateParameterList *Params,
unsigned NumExpansions, TemplateParameterList * const *Expansions)
: TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
TemplateParmPosition(D, P), ParameterPack(true),
ExpandedParameterPack(true), NumExpandedParams(NumExpansions) {
if (Expansions)
std::memcpy(reinterpret_cast<void*>(this + 1), Expansions,
sizeof(TemplateParameterList*) * NumExpandedParams);
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
bool ParameterPack, IdentifierInfo *Id,
TemplateParameterList *Params) {
return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
Params);
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id,
TemplateParameterList *Params,
ArrayRef<TemplateParameterList *> Expansions) {
return new (C, DC, sizeof(TemplateParameterList*) * Expansions.size())
TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
Expansions.size(), Expansions.data());
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0,
false, nullptr, nullptr);
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpansions) {
return new (C, ID, sizeof(TemplateParameterList*) * NumExpansions)
TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr,
nullptr, NumExpansions, nullptr);
}
SourceLocation TemplateTemplateParmDecl::getDefaultArgumentLoc() const {
return hasDefaultArgument() ? getDefaultArgument().getLocation()
: SourceLocation();
}
void TemplateTemplateParmDecl::setDefaultArgument(
const ASTContext &C, const TemplateArgumentLoc &DefArg) {
if (DefArg.getArgument().isNull())
DefaultArgument.set(nullptr);
else
DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
}
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
TemplateArgumentList *
TemplateArgumentList::CreateCopy(ASTContext &Context,
const TemplateArgument *Args,
unsigned NumArgs) {
std::size_t Size = sizeof(TemplateArgumentList)
+ NumArgs * sizeof(TemplateArgument);
void *Mem = Context.Allocate(Size);
TemplateArgument *StoredArgs
= reinterpret_cast<TemplateArgument *>(
static_cast<TemplateArgumentList *>(Mem) + 1);
std::uninitialized_copy(Args, Args + NumArgs, StoredArgs);
return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true);
}
FunctionTemplateSpecializationInfo *
FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
FunctionTemplateDecl *Template,
TemplateSpecializationKind TSK,
const TemplateArgumentList *TemplateArgs,
const TemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation POI) {
const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
if (TemplateArgsAsWritten)
ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C,
*TemplateArgsAsWritten);
return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
TemplateArgs,
ArgsAsWritten,
POI);
}
//===----------------------------------------------------------------------===//
// TemplateDecl Implementation
//===----------------------------------------------------------------------===//
void TemplateDecl::anchor() { }
//===----------------------------------------------------------------------===//
// ClassTemplateSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
ClassTemplateSpecializationDecl::
ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc,
ClassTemplateDecl *SpecializedTemplate,
const TemplateArgument *Args,
unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl)
: CXXRecordDecl(DK, TK, Context, DC, StartLoc, IdLoc,
SpecializedTemplate->getIdentifier(),
PrevDecl),
SpecializedTemplate(SpecializedTemplate),
ExplicitInfo(nullptr),
TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
SpecializationKind(TSK_Undeclared) {
}
ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(ASTContext &C,
Kind DK)
: CXXRecordDecl(DK, TTK_Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr),
ExplicitInfo(nullptr), SpecializationKind(TSK_Undeclared) {}
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc,
ClassTemplateDecl *SpecializedTemplate,
const TemplateArgument *Args,
unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl) {
ClassTemplateSpecializationDecl *Result =
new (Context, DC) ClassTemplateSpecializationDecl(
Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
SpecializedTemplate, Args, NumArgs, PrevDecl);
Result->MayHaveOutOfDateDef = false;
Context.getTypeDeclType(Result, PrevDecl);
return Result;
}
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
ClassTemplateSpecializationDecl *Result =
new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
Result->MayHaveOutOfDateDef = false;
return Result;
}
void ClassTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
const TemplateArgumentList &TemplateArgs = getTemplateArgs();
TemplateSpecializationType::PrintTemplateArgumentList(
OS, TemplateArgs.data(), TemplateArgs.size(), Policy);
}
ClassTemplateDecl *
ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
if (SpecializedPartialSpecialization *PartialSpec
= SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
return PartialSpec->PartialSpecialization->getSpecializedTemplate();
return SpecializedTemplate.get<ClassTemplateDecl*>();
}
SourceRange
ClassTemplateSpecializationDecl::getSourceRange() const {
if (ExplicitInfo) {
SourceLocation Begin = getTemplateKeywordLoc();
if (Begin.isValid()) {
// Here we have an explicit (partial) specialization or instantiation.
assert(getSpecializationKind() == TSK_ExplicitSpecialization ||
getSpecializationKind() == TSK_ExplicitInstantiationDeclaration ||
getSpecializationKind() == TSK_ExplicitInstantiationDefinition);
if (getExternLoc().isValid())
Begin = getExternLoc();
SourceLocation End = getRBraceLoc();
if (End.isInvalid())
End = getTypeAsWritten()->getTypeLoc().getEndLoc();
return SourceRange(Begin, End);
}
// An implicit instantiation of a class template partial specialization
// uses ExplicitInfo to record the TypeAsWritten, but the source
// locations should be retrieved from the instantiation pattern.
typedef ClassTemplatePartialSpecializationDecl CTPSDecl;
CTPSDecl *ctpsd = const_cast<CTPSDecl*>(cast<CTPSDecl>(this));
CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember();
assert(inst_from != nullptr);
return inst_from->getSourceRange();
}
else {
// No explicit info available.
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
inst_from = getInstantiatedFrom();
if (inst_from.isNull())
return getSpecializedTemplate()->getSourceRange();
if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>())
return ctd->getSourceRange();
return inst_from.get<ClassTemplatePartialSpecializationDecl*>()
->getSourceRange();
}
}
//===----------------------------------------------------------------------===//
// ClassTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
void ClassTemplatePartialSpecializationDecl::anchor() { }
ClassTemplatePartialSpecializationDecl::
ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
const TemplateArgument *Args,
unsigned NumArgs,
const ASTTemplateArgumentListInfo *ArgInfos,
ClassTemplatePartialSpecializationDecl *PrevDecl)
: ClassTemplateSpecializationDecl(Context,
ClassTemplatePartialSpecialization,
TK, DC, StartLoc, IdLoc,
SpecializedTemplate,
Args, NumArgs, PrevDecl),
TemplateParams(Params), ArgsAsWritten(ArgInfos),
InstantiatedFromMember(nullptr, false)
{
AdoptTemplateParameterList(Params, this);
}
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::
Create(ASTContext &Context, TagKind TK,DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
const TemplateArgument *Args,
unsigned NumArgs,
const TemplateArgumentListInfo &ArgInfos,
QualType CanonInjectedType,
ClassTemplatePartialSpecializationDecl *PrevDecl) {
const ASTTemplateArgumentListInfo *ASTArgInfos =
ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
ClassTemplatePartialSpecializationDecl *Result = new (Context, DC)
ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
Params, SpecializedTemplate, Args,
NumArgs, ASTArgInfos, PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
Result->MayHaveOutOfDateDef = false;
Context.getInjectedClassNameType(Result, CanonInjectedType);
return Result;
}
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
ClassTemplatePartialSpecializationDecl *Result =
new (C, ID) ClassTemplatePartialSpecializationDecl(C);
Result->MayHaveOutOfDateDef = false;
return Result;
}
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
void FriendTemplateDecl::anchor() { }
FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
DeclContext *DC,
SourceLocation L,
unsigned NParams,
TemplateParameterList **Params,
FriendUnion Friend,
SourceLocation FLoc) {
return new (Context, DC) FriendTemplateDecl(DC, L, NParams, Params,
Friend, FLoc);
}
FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) FriendTemplateDecl(EmptyShell());
}
//===----------------------------------------------------------------------===//
// TypeAliasTemplateDecl Implementation
//===----------------------------------------------------------------------===//
TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
DeclarationName Name,
TemplateParameterList *Params,
NamedDecl *Decl) {
AdoptTemplateParameterList(Params, DC);
return new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl);
}
TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) TypeAliasTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
static_cast<Common *>(Ptr)->~Common();
}
RedeclarableTemplateDecl::CommonBase *
TypeAliasTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
//===----------------------------------------------------------------------===//
// ClassScopeFunctionSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
void ClassScopeFunctionSpecializationDecl::anchor() { }
ClassScopeFunctionSpecializationDecl *
ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ClassScopeFunctionSpecializationDecl(
nullptr, SourceLocation(), nullptr, false, TemplateArgumentListInfo());
}
//===----------------------------------------------------------------------===//
// VarTemplateDecl Implementation
//===----------------------------------------------------------------------===//
void VarTemplateDecl::DeallocateCommon(void *Ptr) {
static_cast<Common *>(Ptr)->~Common();
}
VarTemplateDecl *VarTemplateDecl::getDefinition() {
VarTemplateDecl *CurD = this;
while (CurD) {
if (CurD->isThisDeclarationADefinition())
return CurD;
CurD = CurD->getPreviousDecl();
}
return nullptr;
}
VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
TemplateParameterList *Params,
VarDecl *Decl) {
return new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl);
}
VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) VarTemplateDecl(C, nullptr, SourceLocation(),
DeclarationName(), nullptr, nullptr);
}
// TODO: Unify across class, function and variable templates?
// May require moving this and Common to RedeclarableTemplateDecl.
void VarTemplateDecl::LoadLazySpecializations() const {
// Grab the most recent declaration to ensure we've loaded any lazy
// redeclarations of this template.
//
// FIXME: Avoid walking the entire redeclaration chain here.
Common *CommonPtr = getMostRecentDecl()->getCommonPtr();
if (CommonPtr->LazySpecializations) {
ASTContext &Context = getASTContext();
uint32_t *Specs = CommonPtr->LazySpecializations;
CommonPtr->LazySpecializations = nullptr;
for (uint32_t I = 0, N = *Specs++; I != N; ++I)
(void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
}
}
llvm::FoldingSetVector<VarTemplateSpecializationDecl> &
VarTemplateDecl::getSpecializations() const {
LoadLazySpecializations();
return getCommonPtr()->Specializations;
}
llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &
VarTemplateDecl::getPartialSpecializations() {
LoadLazySpecializations();
return getCommonPtr()->PartialSpecializations;
}
RedeclarableTemplateDecl::CommonBase *
VarTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
C.AddDeallocation(DeallocateCommon, CommonPtr);
return CommonPtr;
}
VarTemplateSpecializationDecl *
VarTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
return findSpecializationImpl(getSpecializations(), Args, InsertPos);
}
void VarTemplateDecl::AddSpecialization(VarTemplateSpecializationDecl *D,
void *InsertPos) {
addSpecializationImpl<VarTemplateDecl>(getSpecializations(), D, InsertPos);
}
VarTemplatePartialSpecializationDecl *
VarTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
void *&InsertPos) {
return findSpecializationImpl(getPartialSpecializations(), Args, InsertPos);
}
void VarTemplateDecl::AddPartialSpecialization(
VarTemplatePartialSpecializationDecl *D, void *InsertPos) {
if (InsertPos)
getPartialSpecializations().InsertNode(D, InsertPos);
else {
VarTemplatePartialSpecializationDecl *Existing =
getPartialSpecializations().GetOrInsertNode(D);
(void)Existing;
assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
}
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(this, D);
}
void VarTemplateDecl::getPartialSpecializations(
SmallVectorImpl<VarTemplatePartialSpecializationDecl *> &PS) {
llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> &PartialSpecs =
getPartialSpecializations();
PS.clear();
PS.reserve(PartialSpecs.size());
for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator
P = PartialSpecs.begin(),
PEnd = PartialSpecs.end();
P != PEnd; ++P)
PS.push_back(P->getMostRecentDecl());
}
VarTemplatePartialSpecializationDecl *
VarTemplateDecl::findPartialSpecInstantiatedFromMember(
VarTemplatePartialSpecializationDecl *D) {
Decl *DCanon = D->getCanonicalDecl();
for (llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl>::iterator
P = getPartialSpecializations().begin(),
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
return P->getMostRecentDecl();
}
return nullptr;
}
//===----------------------------------------------------------------------===//
// VarTemplateSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
VarTemplateSpecializationDecl::VarTemplateSpecializationDecl(
Kind DK, ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
unsigned NumArgs)
: VarDecl(DK, Context, DC, StartLoc, IdLoc,
SpecializedTemplate->getIdentifier(), T, TInfo, S),
SpecializedTemplate(SpecializedTemplate), ExplicitInfo(nullptr),
TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
SpecializationKind(TSK_Undeclared) {}
VarTemplateSpecializationDecl::VarTemplateSpecializationDecl(Kind DK,
ASTContext &C)
: VarDecl(DK, C, nullptr, SourceLocation(), SourceLocation(), nullptr,
QualType(), nullptr, SC_None),
ExplicitInfo(nullptr), SpecializationKind(TSK_Undeclared) {}
VarTemplateSpecializationDecl *VarTemplateSpecializationDecl::Create(
ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
unsigned NumArgs) {
return new (Context, DC) VarTemplateSpecializationDecl(
VarTemplateSpecialization, Context, DC, StartLoc, IdLoc,
SpecializedTemplate, T, TInfo, S, Args, NumArgs);
}
VarTemplateSpecializationDecl *
VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID)
VarTemplateSpecializationDecl(VarTemplateSpecialization, C);
}
void VarTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
const TemplateArgumentList &TemplateArgs = getTemplateArgs();
TemplateSpecializationType::PrintTemplateArgumentList(
OS, TemplateArgs.data(), TemplateArgs.size(), Policy);
}
VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
if (SpecializedPartialSpecialization *PartialSpec =
SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization *>())
return PartialSpec->PartialSpecialization->getSpecializedTemplate();
return SpecializedTemplate.get<VarTemplateDecl *>();
}
void VarTemplateSpecializationDecl::setTemplateArgsInfo(
const TemplateArgumentListInfo &ArgsInfo) {
unsigned N = ArgsInfo.size();
TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc());
TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc());
for (unsigned I = 0; I != N; ++I)
TemplateArgsInfo.addArgument(ArgsInfo[I]);
}
//===----------------------------------------------------------------------===//
// VarTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
void VarTemplatePartialSpecializationDecl::anchor() {}
VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl(
ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
StorageClass S, const TemplateArgument *Args, unsigned NumArgs,
const ASTTemplateArgumentListInfo *ArgInfos)
: VarTemplateSpecializationDecl(VarTemplatePartialSpecialization, Context,
DC, StartLoc, IdLoc, SpecializedTemplate, T,
TInfo, S, Args, NumArgs),
TemplateParams(Params), ArgsAsWritten(ArgInfos),
InstantiatedFromMember(nullptr, false) {
// TODO: The template parameters should be in DC by now. Verify.
// AdoptTemplateParameterList(Params, DC);
}
VarTemplatePartialSpecializationDecl *
VarTemplatePartialSpecializationDecl::Create(
ASTContext &Context, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, TemplateParameterList *Params,
VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo,
StorageClass S, const TemplateArgument *Args, unsigned NumArgs,
const TemplateArgumentListInfo &ArgInfos) {
const ASTTemplateArgumentListInfo *ASTArgInfos
= ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
VarTemplatePartialSpecializationDecl *Result =
new (Context, DC) VarTemplatePartialSpecializationDecl(
Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
S, Args, NumArgs, ASTArgInfos);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
return Result;
}
VarTemplatePartialSpecializationDecl *
VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) VarTemplatePartialSpecializationDecl(C);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/TypeLoc.cpp | //===--- TypeLoc.cpp - Type Source Info Wrapper -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the TypeLoc subclasses implementations.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/TypeLoc.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/TypeLocVisitor.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
static const unsigned TypeLocMaxDataAlign = llvm::alignOf<void *>();
//===----------------------------------------------------------------------===//
// TypeLoc Implementation
//===----------------------------------------------------------------------===//
namespace {
class TypeLocRanger : public TypeLocVisitor<TypeLocRanger, SourceRange> {
public:
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
return TyLoc.getLocalSourceRange(); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) {
if (TL.isNull()) return SourceRange();
return TypeLocRanger().Visit(TL);
}
namespace {
class TypeAligner : public TypeLocVisitor<TypeAligner, unsigned> {
public:
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
return TyLoc.getLocalDataAlignment(); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
/// \brief Returns the alignment of the type source info data block.
unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) {
if (Ty.isNull()) return 1;
return TypeAligner().Visit(TypeLoc(Ty, nullptr));
}
namespace {
class TypeSizer : public TypeLocVisitor<TypeSizer, unsigned> {
public:
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
return TyLoc.getLocalDataSize(); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
/// \brief Returns the size of the type source info data block.
unsigned TypeLoc::getFullDataSizeForType(QualType Ty) {
unsigned Total = 0;
TypeLoc TyLoc(Ty, nullptr);
unsigned MaxAlign = 1;
while (!TyLoc.isNull()) {
unsigned Align = getLocalAlignmentForType(TyLoc.getType());
MaxAlign = std::max(Align, MaxAlign);
Total = llvm::RoundUpToAlignment(Total, Align);
Total += TypeSizer().Visit(TyLoc);
TyLoc = TyLoc.getNextTypeLoc();
}
Total = llvm::RoundUpToAlignment(Total, MaxAlign);
return Total;
}
namespace {
class NextLoc : public TypeLocVisitor<NextLoc, TypeLoc> {
public:
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
TypeLoc Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
return TyLoc.getNextTypeLoc(); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
/// TypeLoc is a PointerLoc and next TypeLoc is for "int".
TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
return NextLoc().Visit(TL);
}
/// \brief Initializes a type location, and all of its children
/// recursively, as if the entire tree had been written in the
/// given location.
void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
SourceLocation Loc) {
while (true) {
switch (TL.getTypeLocClass()) {
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
case CLASS: { \
CLASS##TypeLoc TLCasted = TL.castAs<CLASS##TypeLoc>(); \
TLCasted.initializeLocal(Context, Loc); \
TL = TLCasted.getNextTypeLoc(); \
if (!TL) return; \
continue; \
}
#include "clang/AST/TypeLocNodes.def"
}
}
}
namespace {
class TypeLocCopier : public TypeLocVisitor<TypeLocCopier> {
TypeLoc Source;
public:
TypeLocCopier(TypeLoc source) : Source(source) { }
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
void Visit##CLASS##TypeLoc(CLASS##TypeLoc dest) { \
dest.copyLocal(Source.castAs<CLASS##TypeLoc>()); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
void TypeLoc::copy(TypeLoc other) {
assert(getFullDataSize() == other.getFullDataSize());
// If both data pointers are aligned to the maximum alignment, we
// can memcpy because getFullDataSize() accurately reflects the
// layout of the data.
if (reinterpret_cast<uintptr_t>(Data)
== llvm::RoundUpToAlignment(reinterpret_cast<uintptr_t>(Data),
TypeLocMaxDataAlign) &&
reinterpret_cast<uintptr_t>(other.Data)
== llvm::RoundUpToAlignment(reinterpret_cast<uintptr_t>(other.Data),
TypeLocMaxDataAlign)) {
memcpy(Data, other.Data, getFullDataSize());
return;
}
// Copy each of the pieces.
TypeLoc TL(getType(), Data);
do {
TypeLocCopier(other).Visit(TL);
other = other.getNextTypeLoc();
} while ((TL = TL.getNextTypeLoc()));
}
SourceLocation TypeLoc::getBeginLoc() const {
TypeLoc Cur = *this;
TypeLoc LeftMost = Cur;
while (true) {
switch (Cur.getTypeLocClass()) {
case Elaborated:
LeftMost = Cur;
break;
case FunctionProto:
if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()
->hasTrailingReturn()) {
LeftMost = Cur;
break;
}
LLVM_FALLTHROUGH; // HLSL Change
case FunctionNoProto:
case ConstantArray:
case DependentSizedArray:
case IncompleteArray:
case VariableArray:
// FIXME: Currently QualifiedTypeLoc does not have a source range
case Qualified:
Cur = Cur.getNextTypeLoc();
continue;
default:
if (!Cur.getLocalSourceRange().getBegin().isInvalid())
LeftMost = Cur;
Cur = Cur.getNextTypeLoc();
if (Cur.isNull())
break;
continue;
} // switch
break;
} // while
return LeftMost.getLocalSourceRange().getBegin();
}
SourceLocation TypeLoc::getEndLoc() const {
TypeLoc Cur = *this;
TypeLoc Last;
while (true) {
switch (Cur.getTypeLocClass()) {
default:
if (!Last)
Last = Cur;
return Last.getLocalSourceRange().getEnd();
case Paren:
case ConstantArray:
case DependentSizedArray:
case IncompleteArray:
case VariableArray:
case FunctionNoProto:
Last = Cur;
break;
case FunctionProto:
if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()->hasTrailingReturn())
Last = TypeLoc();
else
Last = Cur;
break;
case Pointer:
case BlockPointer:
case MemberPointer:
case LValueReference:
case RValueReference:
case PackExpansion:
if (!Last)
Last = Cur;
break;
case Qualified:
case Elaborated:
break;
}
Cur = Cur.getNextTypeLoc();
}
}
namespace {
struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
// Overload resolution does the real work for us.
static bool isTypeSpec(TypeSpecTypeLoc _) { return true; }
static bool isTypeSpec(TypeLoc _) { return false; }
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
return isTypeSpec(TyLoc); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
/// \brief Determines if the given type loc corresponds to a
/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in
/// the type hierarchy, this is made somewhat complicated.
///
/// There are a lot of types that currently use TypeSpecTypeLoc
/// because it's a convenient base class. Ideally we would not accept
/// those here, but ideally we would have better implementations for
/// them.
bool TypeSpecTypeLoc::isKind(const TypeLoc &TL) {
if (TL.getType().hasLocalQualifiers()) return false;
return TSTChecker().Visit(TL);
}
// Reimplemented to account for GNU/C++ extension
// typeof unary-expression
// where there are no parentheses.
SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
if (getRParenLoc().isValid())
return SourceRange(getTypeofLoc(), getRParenLoc());
else
return SourceRange(getTypeofLoc(),
getUnderlyingExpr()->getSourceRange().getEnd());
}
TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
if (needsExtraLocalData())
return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type);
switch (getTypePtr()->getKind()) {
case BuiltinType::Void:
return TST_void;
case BuiltinType::Bool:
return TST_bool;
case BuiltinType::Char_U:
case BuiltinType::Char_S:
return TST_char;
case BuiltinType::Char16:
return TST_char16;
case BuiltinType::Char32:
return TST_char32;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
return TST_wchar;
case BuiltinType::UChar:
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::ULong:
case BuiltinType::ULongLong:
case BuiltinType::UInt128:
case BuiltinType::SChar:
case BuiltinType::Short:
case BuiltinType::Int:
case BuiltinType::Long:
case BuiltinType::LongLong:
case BuiltinType::Int128:
case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
// HLSL Change Starts
case BuiltinType::HalfFloat:
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::Min12Int:
case BuiltinType::LitFloat:
case BuiltinType::LitInt:
case BuiltinType::Int8_4Packed:
case BuiltinType::UInt8_4Packed:
// HLSL Change Ends
llvm_unreachable("Builtin type needs extra local data!");
// Fall through, if the impossible happens.
case BuiltinType::NullPtr:
case BuiltinType::Overload:
case BuiltinType::Dependent:
case BuiltinType::BoundMember:
case BuiltinType::UnknownAny:
case BuiltinType::ARCUnbridgedCast:
case BuiltinType::PseudoObject:
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::BuiltinFn:
return TST_unspecified;
}
llvm_unreachable("Invalid BuiltinType Kind!");
}
TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
while (ParenTypeLoc PTL = TL.getAs<ParenTypeLoc>())
TL = PTL.getInnerLoc();
return TL;
}
SourceLocation TypeLoc::findNullabilityLoc() const {
if (auto attributedLoc = getAs<AttributedTypeLoc>()) {
if (attributedLoc.getAttrKind() == AttributedType::attr_nullable ||
attributedLoc.getAttrKind() == AttributedType::attr_nonnull ||
attributedLoc.getAttrKind() == AttributedType::attr_null_unspecified)
return attributedLoc.getAttrNameLoc();
}
return SourceLocation();
}
void ObjCObjectTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setHasBaseTypeAsWritten(true);
setTypeArgsLAngleLoc(Loc);
setTypeArgsRAngleLoc(Loc);
for (unsigned i = 0, e = getNumTypeArgs(); i != e; ++i) {
setTypeArgTInfo(i,
Context.getTrivialTypeSourceInfo(
getTypePtr()->getTypeArgsAsWritten()[i], Loc));
}
setProtocolLAngleLoc(Loc);
setProtocolRAngleLoc(Loc);
for (unsigned i = 0, e = getNumProtocols(); i != e; ++i)
setProtocolLoc(i, Loc);
}
void TypeOfTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo>
::initializeLocal(Context, Loc);
this->getLocalData()->UnderlyingTInfo = Context.getTrivialTypeSourceInfo(
getUnderlyingType(), Loc);
}
void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setElaboratedKeywordLoc(Loc);
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
}
void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setElaboratedKeywordLoc(Loc);
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
setNameLoc(Loc);
}
void
DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setElaboratedKeywordLoc(Loc);
if (getTypePtr()->getQualifier()) {
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
} else {
setQualifierLoc(NestedNameSpecifierLoc());
}
setTemplateKeywordLoc(Loc);
setTemplateNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
getTypePtr()->getArgs(),
getArgInfos(), Loc);
}
void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
unsigned NumArgs,
const TemplateArgument *Args,
TemplateArgumentLocInfo *ArgInfos,
SourceLocation Loc) {
for (unsigned i = 0, e = NumArgs; i != e; ++i) {
switch (Args[i].getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Impossible TemplateArgument");
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
ArgInfos[i] = TemplateArgumentLocInfo();
break;
case TemplateArgument::Expression:
ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
break;
case TemplateArgument::Type:
ArgInfos[i] = TemplateArgumentLocInfo(
Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
Loc));
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLocBuilder Builder;
TemplateName Template = Args[i].getAsTemplateOrTemplatePattern();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
ArgInfos[i] = TemplateArgumentLocInfo(
Builder.getWithLocInContext(Context), Loc,
Args[i].getKind() == TemplateArgument::Template ? SourceLocation()
: Loc);
break;
}
case TemplateArgument::Pack:
ArgInfos[i] = TemplateArgumentLocInfo();
break;
}
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/MicrosoftMangle.cpp | //===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides C++ name mangling targeting the Microsoft Visual C++ ABI.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MathExtras.h"
using namespace clang;
namespace {
/// \brief Retrieve the declaration context that should be used when mangling
/// the given declaration.
static const DeclContext *getEffectiveDeclContext(const Decl *D) {
// The ABI assumes that lambda closure types that occur within
// default arguments live in the context of the function. However, due to
// the way in which Clang parses and creates function declarations, this is
// not the case: the lambda closure type ends up living in the context
// where the function itself resides, because the function declaration itself
// had not yet been created. Fix the context here.
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (RD->isLambda())
if (ParmVarDecl *ContextParam =
dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
return ContextParam->getDeclContext();
}
// Perform the same check for block literals.
if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
if (ParmVarDecl *ContextParam =
dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
return ContextParam->getDeclContext();
}
const DeclContext *DC = D->getDeclContext();
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC))
return getEffectiveDeclContext(CD);
return DC;
}
static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
return getEffectiveDeclContext(cast<Decl>(DC));
}
static const FunctionDecl *getStructor(const NamedDecl *ND) {
if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(ND))
return FTD->getTemplatedDecl();
const auto *FD = cast<FunctionDecl>(ND);
if (const auto *FTD = FD->getPrimaryTemplate())
return FTD->getTemplatedDecl();
return FD;
}
static bool isLambda(const NamedDecl *ND) {
const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
if (!Record)
return false;
return Record->isLambda();
}
/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
/// Microsoft Visual C++ ABI.
class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
typedef std::pair<const DeclContext *, IdentifierInfo *> DiscriminatorKeyTy;
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl *, unsigned> Uniquifier;
llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds;
llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds;
public:
MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
: MicrosoftMangleContext(Context, Diags) {}
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
raw_ostream &) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &ThisAdjustment,
raw_ostream &) override;
void mangleCXXVFTable(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
void mangleCXXVBTable(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
void mangleCXXVirtualDisplacementMap(const CXXRecordDecl *SrcRD,
const CXXRecordDecl *DstRD,
raw_ostream &Out) override;
void mangleCXXThrowInfo(QualType T, bool IsConst, bool IsVolatile,
uint32_t NumEntries, raw_ostream &Out) override;
void mangleCXXCatchableTypeArray(QualType T, uint32_t NumEntries,
raw_ostream &Out) override;
void mangleCXXCatchableType(QualType T, const CXXConstructorDecl *CD,
CXXCtorType CT, uint32_t Size, uint32_t NVOffset,
int32_t VBPtrOffset, uint32_t VBIndex,
raw_ostream &Out) override;
void mangleCXXCatchHandlerType(QualType T, uint32_t Flags,
raw_ostream &Out) override;
void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
uint32_t NVOffset, int32_t VBPtrOffset,
uint32_t VBTableOffset, uint32_t Flags,
raw_ostream &Out) override;
void mangleCXXRTTIBaseClassArray(const CXXRecordDecl *Derived,
raw_ostream &Out) override;
void mangleCXXRTTIClassHierarchyDescriptor(const CXXRecordDecl *Derived,
raw_ostream &Out) override;
void
mangleCXXRTTICompleteObjectLocator(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
void mangleTypeName(QualType T, raw_ostream &) override;
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
raw_ostream &) override;
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
raw_ostream &) override;
void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
void mangleThreadSafeStaticGuardVariable(const VarDecl *D, unsigned GuardNum,
raw_ostream &Out) override;
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
void mangleCXXVTableBitSet(const CXXRecordDecl *RD,
raw_ostream &Out) override;
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
return false;
const DeclContext *DC = getEffectiveDeclContext(ND);
if (!DC->isFunctionOrMethod())
return false;
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
disc = getASTContext().getManglingNumber(ND);
return true;
}
// Anonymous tags are already numbered.
if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
return false;
}
// Make up a reasonable number for internal decls.
unsigned &discriminator = Uniquifier[ND];
if (!discriminator)
discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
disc = discriminator + 1;
return true;
}
unsigned getLambdaId(const CXXRecordDecl *RD) {
assert(RD->isLambda() && "RD must be a lambda!");
assert(!RD->isExternallyVisible() && "RD must not be visible!");
assert(RD->getLambdaManglingNumber() == 0 &&
"RD must not have a mangling number!");
std::pair<llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator, bool>
Result = LambdaIds.insert(std::make_pair(RD, LambdaIds.size()));
return Result.first->second;
}
private:
void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
};
/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
/// Microsoft Visual C++ ABI.
class MicrosoftCXXNameMangler {
MicrosoftMangleContextImpl &Context;
raw_ostream &Out;
/// The "structor" is the top-level declaration being mangled, if
/// that's not a template specialization; otherwise it's the pattern
/// for that specialization.
const NamedDecl *Structor;
unsigned StructorType;
typedef llvm::SmallVector<std::string, 10> BackRefVec;
BackRefVec NameBackReferences;
typedef llvm::DenseMap<void *, unsigned> ArgBackRefMap;
ArgBackRefMap TypeBackReferences;
ASTContext &getASTContext() const { return Context.getASTContext(); }
// FIXME: If we add support for __ptr32/64 qualifiers, then we should push
// this check into mangleQualifiers().
const bool PointersAre64Bit;
public:
enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result };
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
: Context(C), Out(Out_), Structor(nullptr), StructorType(-1),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) {}
raw_ostream &getStream() const { return Out; }
void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD);
void mangleVirtualMemPtrThunk(
const CXXMethodDecl *MD,
const MicrosoftVTableContext::MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
void mangleFunctionType(const FunctionType *T,
const FunctionDecl *D = nullptr,
bool ForceThisQuals = false);
void mangleNestedName(const NamedDecl *ND);
private:
void mangleUnqualifiedName(const NamedDecl *ND) {
mangleUnqualifiedName(ND, ND->getDeclName());
}
void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
void mangleSourceName(StringRef Name);
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleCXXDtorType(CXXDtorType T);
void mangleQualifiers(Qualifiers Quals, bool IsMember);
void mangleRefQualifier(RefQualifierKind RefQualifier);
void manglePointerCVQualifiers(Qualifiers Quals);
void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
void
mangleTemplateInstantiationName(const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
void mangleArgumentType(QualType T, SourceRange Range);
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T, \
Qualifiers Quals, \
SourceRange Range);
#include "clang/AST/TypeNodes.def"
#undef ABSTRACT_TYPE
#undef NON_CANONICAL_TYPE
#undef TYPE
void mangleType(const TagDecl *TD);
void mangleDecayedArrayType(const ArrayType *T);
void mangleArrayType(const ArrayType *T);
void mangleFunctionClass(const FunctionDecl *FD);
void mangleCallingConvention(CallingConv CC);
void mangleCallingConvention(const FunctionType *T);
void mangleIntegerLiteral(const llvm::APSInt &Number, bool IsBoolean);
void mangleExpression(const Expr *E);
void mangleThrowSpecification(const FunctionProtoType *T);
void mangleTemplateArgs(const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs);
void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
const NamedDecl *Parm);
};
}
bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
LanguageLinkage L = FD->getLanguageLinkage();
// Overloadable functions need mangling.
if (FD->hasAttr<OverloadableAttr>())
return true;
// The ABI expects that we would never mangle "typical" user-defined entry
// points regardless of visibility or freestanding-ness.
//
// N.B. This is distinct from asking about "main". "main" has a lot of
// special rules associated with it in the standard while these
// user-defined entry points are outside of the purview of the standard.
// For example, there can be only one definition for "main" in a standards
// compliant program; however nothing forbids the existence of wmain and
// WinMain in the same translation unit.
if (FD->isMSVCRTEntryPoint())
return false;
// C++ functions and those whose names are not a simple identifier need
// mangling.
if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage)
return true;
// C functions are not mangled.
if (L == CLanguageLinkage)
return false;
}
// Otherwise, no mangling is done outside C++ mode.
if (!getASTContext().getLangOpts().CPlusPlus)
return false;
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// C variables are not mangled.
if (VD->isExternC())
return false;
// Variables at global scope with non-internal linkage are not mangled.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage &&
!isa<VarTemplateSpecializationDecl>(D))
return false;
}
return true;
}
bool
MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
return true;
}
void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
// MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
// Therefore it's really important that we don't decorate the
// name with leading underscores or leading/trailing at signs. So, by
// default, we emit an asm marker at the start so we get the name right.
// Callers can override this with a custom prefix.
// <mangled-name> ::= ? <name> <type-encoding>
Out << Prefix;
mangleName(D);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
else {
// TODO: Fields? Can MSVC even mangle them?
// Issue a diagnostic for now.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot mangle this declaration yet");
Diags.Report(D->getLocation(), DiagID) << D->getSourceRange();
}
}
void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
bool ShouldMangle) {
// <type-encoding> ::= <function-class> <function-type>
// Since MSVC operates on the type as written and not the canonical type, it
// actually matters which decl we have here. MSVC appears to choose the
// first, since it is most likely to be the declaration in a header file.
FD = FD->getFirstDecl();
// We should never ever see a FunctionNoProtoType at this point.
// We don't even know how to mangle their types anyway :).
const FunctionProtoType *FT = FD->getType()->castAs<FunctionProtoType>();
// extern "C" functions can hold entities that must be mangled.
// As it stands, these functions still need to get expressed in the full
// external name. They have their class and type omitted, replaced with '9'.
if (ShouldMangle) {
// We would like to mangle all extern "C" functions using this additional
// component but this would break compatibility with MSVC's behavior.
// Instead, do this when we know that compatibility isn't important (in
// other words, when it is an overloaded extern "C" funciton).
if (FD->isExternC() && FD->hasAttr<OverloadableAttr>())
Out << "$$J0";
mangleFunctionClass(FD);
mangleFunctionType(FT, FD);
} else {
Out << '9';
}
}
void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
// <type-encoding> ::= <storage-class> <variable-type>
// <storage-class> ::= 0 # private static member
// ::= 1 # protected static member
// ::= 2 # public static member
// ::= 3 # global
// ::= 4 # static local
// The first character in the encoding (after the name) is the storage class.
if (VD->isStaticDataMember()) {
// If it's a static member, it also encodes the access level.
switch (VD->getAccess()) {
default:
case AS_private: Out << '0'; break;
case AS_protected: Out << '1'; break;
case AS_public: Out << '2'; break;
}
}
else if (!VD->isStaticLocal())
Out << '3';
else
Out << '4';
// Now mangle the type.
// <variable-type> ::= <type> <cvr-qualifiers>
// ::= <type> <pointee-cvr-qualifiers> # pointers, references
// Pointers and references are odd. The type of 'int * const foo;' gets
// mangled as 'QAHA' instead of 'PAHB', for example.
SourceRange SR = VD->getSourceRange();
QualType Ty = VD->getType();
if (Ty->isPointerType() || Ty->isReferenceType() ||
Ty->isMemberPointerType()) {
mangleType(Ty, SR, QMM_Drop);
manglePointerExtQualifiers(
Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), QualType());
if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) {
mangleQualifiers(MPT->getPointeeType().getQualifiers(), true);
// Member pointers are suffixed with a back reference to the member
// pointer's class name.
mangleName(MPT->getClass()->getAsCXXRecordDecl());
} else
mangleQualifiers(Ty->getPointeeType().getQualifiers(), false);
} else if (const ArrayType *AT = getASTContext().getAsArrayType(Ty)) {
// Global arrays are funny, too.
mangleDecayedArrayType(AT);
if (AT->getElementType()->isArrayType())
Out << 'A';
else
mangleQualifiers(Ty.getQualifiers(), false);
} else {
mangleType(Ty, SR, QMM_Drop);
mangleQualifiers(Ty.getQualifiers(), false);
}
}
void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
const ValueDecl *VD) {
// <member-data-pointer> ::= <integer-literal>
// ::= $F <number> <number>
// ::= $G <number> <number> <number>
int64_t FieldOffset;
int64_t VBTableOffset;
MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
if (VD) {
FieldOffset = getASTContext().getFieldOffset(VD);
assert(FieldOffset % getASTContext().getCharWidth() == 0 &&
"cannot take address of bitfield");
FieldOffset /= getASTContext().getCharWidth();
VBTableOffset = 0;
if (IM == MSInheritanceAttr::Keyword_virtual_inheritance)
FieldOffset -= getASTContext().getOffsetOfBaseWithVBPtr(RD).getQuantity();
} else {
FieldOffset = RD->nullFieldOffsetIsZero() ? 0 : -1;
VBTableOffset = -1;
}
char Code = '\0';
switch (IM) {
case MSInheritanceAttr::Keyword_single_inheritance: Code = '0'; break;
case MSInheritanceAttr::Keyword_multiple_inheritance: Code = '0'; break;
case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'F'; break;
case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'G'; break;
}
Out << '$' << Code;
mangleNumber(FieldOffset);
// The C++ standard doesn't allow base-to-derived member pointer conversions
// in template parameter contexts, so the vbptr offset of data member pointers
// is always zero.
if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
mangleNumber(0);
if (MSInheritanceAttr::hasVBTableOffsetField(IM))
mangleNumber(VBTableOffset);
}
void
MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD) {
// <member-function-pointer> ::= $1? <name>
// ::= $H? <name> <number>
// ::= $I? <name> <number> <number>
// ::= $J? <name> <number> <number> <number>
MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
char Code = '\0';
switch (IM) {
case MSInheritanceAttr::Keyword_single_inheritance: Code = '1'; break;
case MSInheritanceAttr::Keyword_multiple_inheritance: Code = 'H'; break;
case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'I'; break;
case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'J'; break;
}
// If non-virtual, mangle the name. If virtual, mangle as a virtual memptr
// thunk.
uint64_t NVOffset = 0;
uint64_t VBTableOffset = 0;
uint64_t VBPtrOffset = 0;
if (MD) {
Out << '$' << Code << '?';
if (MD->isVirtual()) {
MicrosoftVTableContext *VTContext =
cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
const MicrosoftVTableContext::MethodVFTableLocation &ML =
VTContext->getMethodVFTableLocation(GlobalDecl(MD));
mangleVirtualMemPtrThunk(MD, ML);
NVOffset = ML.VFPtrOffset.getQuantity();
VBTableOffset = ML.VBTableIndex * 4;
if (ML.VBase) {
const ASTRecordLayout &Layout = getASTContext().getASTRecordLayout(RD);
VBPtrOffset = Layout.getVBPtrOffset().getQuantity();
}
} else {
mangleName(MD);
mangleFunctionEncoding(MD, /*ShouldMangle=*/true);
}
if (VBTableOffset == 0 &&
IM == MSInheritanceAttr::Keyword_virtual_inheritance)
NVOffset -= getASTContext().getOffsetOfBaseWithVBPtr(RD).getQuantity();
} else {
// Null single inheritance member functions are encoded as a simple nullptr.
if (IM == MSInheritanceAttr::Keyword_single_inheritance) {
Out << "$0A@";
return;
}
if (IM == MSInheritanceAttr::Keyword_unspecified_inheritance)
VBTableOffset = -1;
Out << '$' << Code;
}
if (MSInheritanceAttr::hasNVOffsetField(/*IsMemberFunction=*/true, IM))
mangleNumber(static_cast<uint32_t>(NVOffset));
if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
mangleNumber(VBPtrOffset);
if (MSInheritanceAttr::hasVBTableOffsetField(IM))
mangleNumber(VBTableOffset);
}
void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
const CXXMethodDecl *MD,
const MicrosoftVTableContext::MethodVFTableLocation &ML) {
// Get the vftable offset.
CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
getASTContext().getTargetInfo().getPointerWidth(0));
uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
Out << "?_9";
mangleName(MD->getParent());
Out << "$B";
mangleNumber(OffsetInVFTable);
Out << 'A';
mangleCallingConvention(MD->getType()->getAs<FunctionProtoType>());
}
void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
// Always start with the unqualified name.
mangleUnqualifiedName(ND);
mangleNestedName(ND);
// Terminate the whole name with an '@'.
Out << '@';
}
void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
// <non-negative integer> ::= A@ # when Number == 0
// ::= <decimal digit> # when 1 <= Number <= 10
// ::= <hex digit>+ @ # when Number >= 10
//
// <number> ::= [?] <non-negative integer>
uint64_t Value = static_cast<uint64_t>(Number);
if (Number < 0) {
Value = -Value;
Out << '?';
}
if (Value == 0)
Out << "A@";
else if (Value >= 1 && Value <= 10)
Out << (Value - 1);
else {
// Numbers that are not encoded as decimal digits are represented as nibbles
// in the range of ASCII characters 'A' to 'P'.
// The number 0x123450 would be encoded as 'BCDEFA'
char EncodedNumberBuffer[sizeof(uint64_t) * 2];
MutableArrayRef<char> BufferRef(EncodedNumberBuffer);
MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin();
for (; Value != 0; Value >>= 4)
*I++ = 'A' + (Value & 0xf);
Out.write(I.base(), I - BufferRef.rbegin());
Out << '@';
}
}
static const TemplateDecl *
isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
return TD;
}
}
// Check if we have a class template.
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
}
return nullptr;
}
void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name) {
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
// ::= <source-name>
// ::= <template-name>
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
// Function templates aren't considered for name back referencing. This
// makes sense since function templates aren't likely to occur multiple
// times in a symbol.
// FIXME: Test alias template mangling with MSVC 2013.
if (!isa<ClassTemplateDecl>(TD)) {
mangleTemplateInstantiationName(TD, *TemplateArgs);
Out << '@';
return;
}
// Here comes the tricky thing: if we need to mangle something like
// void foo(A::X<Y>, B::X<Y>),
// the X<Y> part is aliased. However, if you need to mangle
// void foo(A::X<A::Y>, A::X<B::Y>),
// the A::X<> part is not aliased.
// That said, from the mangler's perspective we have a structure like this:
// namespace[s] -> type[ -> template-parameters]
// but from the Clang perspective we have
// type [ -> template-parameters]
// \-> namespace[s]
// What we do is we create a new mangler, mangle the same type (without
// a namespace suffix) to a string using the extra mangler and then use
// the mangled type name as a key to check the mangling of different types
// for aliasing.
llvm::SmallString<64> TemplateMangling;
llvm::raw_svector_ostream Stream(TemplateMangling);
MicrosoftCXXNameMangler Extra(Context, Stream);
Extra.mangleTemplateInstantiationName(TD, *TemplateArgs);
Stream.flush();
mangleSourceName(TemplateMangling);
return;
}
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
mangleSourceName(II->getName());
break;
}
// Otherwise, an anonymous entity. We must have a declaration.
assert(ND && "mangling empty name without declaration");
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
if (NS->isAnonymousNamespace()) {
Out << "?A@";
break;
}
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
const CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl();
assert(RD && "expected variable decl to have a record type");
// Anonymous types with no tag or typedef get the name of their
// declarator mangled in. If they have no declarator, number them with
// a $S prefix.
llvm::SmallString<64> Name("$S");
// Get a unique id for the anonymous struct.
Name += llvm::utostr(Context.getAnonymousStructId(RD) + 1);
mangleSourceName(Name.str());
break;
}
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
assert(TD->getDeclContext() == D->getDeclContext() &&
"Typedef should not be in another decl context!");
assert(D->getDeclName().getAsIdentifierInfo() &&
"Typedef was not named!");
mangleSourceName(D->getDeclName().getAsIdentifierInfo()->getName());
break;
}
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
if (Record->isLambda()) {
llvm::SmallString<10> Name("<lambda_");
unsigned LambdaId;
if (Record->getLambdaManglingNumber())
LambdaId = Record->getLambdaManglingNumber();
else
LambdaId = Context.getLambdaId(Record);
Name += llvm::utostr(LambdaId);
Name += ">";
mangleSourceName(Name);
break;
}
}
llvm::SmallString<64> Name("<unnamed-type-");
if (TD->hasDeclaratorForAnonDecl()) {
// Anonymous types with no tag or typedef get the name of their
// declarator mangled in if they have one.
Name += TD->getDeclaratorForAnonDecl()->getName();
} else {
// Otherwise, number the types using a $S prefix.
Name += "$S";
Name += llvm::utostr(Context.getAnonymousStructId(TD));
}
Name += ">";
mangleSourceName(Name.str());
break;
}
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
llvm_unreachable("Can't mangle Objective-C selector names here!");
case DeclarationName::CXXConstructorName:
if (Structor == getStructor(ND)) {
if (StructorType == Ctor_CopyingClosure) {
Out << "?_O";
return;
}
if (StructorType == Ctor_DefaultClosure) {
Out << "?_F";
return;
}
}
Out << "?0";
return;
case DeclarationName::CXXDestructorName:
if (ND == Structor)
// If the named decl is the C++ destructor we're mangling,
// use the type we were given.
mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
else
// Otherwise, use the base destructor name. This is relevant if a
// class with a destructor is declared within a destructor.
mangleCXXDtorType(Dtor_Base);
break;
case DeclarationName::CXXConversionFunctionName:
// <operator-name> ::= ?B # (cast)
// The target type is encoded as the return type.
Out << "?B";
break;
case DeclarationName::CXXOperatorName:
mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation());
break;
case DeclarationName::CXXLiteralOperatorName: {
Out << "?__K";
mangleSourceName(Name.getCXXLiteralIdentifier()->getName());
break;
}
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
}
}
void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
unsigned Disc;
if (Context.getNextDiscriminator(ND, Disc)) {
Out << '?';
mangleNumber(Disc);
Out << '?';
}
}
if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle a local inside this block yet");
Diags.Report(BD->getLocation(), DiagID);
// FIXME: This is completely, utterly, wrong; see ItaniumMangle
// for how this should be done.
Out << "__block_invoke" << Context.getBlockId(BD, false);
Out << '@';
continue;
} else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(Method);
} else if (isa<NamedDecl>(DC)) {
ND = cast<NamedDecl>(DC);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
mangle(FD, "?");
break;
} else
mangleUnqualifiedName(ND);
}
DC = DC->getParent();
}
}
void MicrosoftCXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
// Microsoft uses the names on the case labels for these dtor variants. Clang
// uses the Itanium terminology internally. Everything in this ABI delegates
// towards the base dtor.
switch (T) {
// <operator-name> ::= ?1 # destructor
case Dtor_Base: Out << "?1"; return;
// <operator-name> ::= ?_D # vbase destructor
case Dtor_Complete: Out << "?_D"; return;
// <operator-name> ::= ?_G # scalar deleting destructor
case Dtor_Deleting: Out << "?_G"; return;
// <operator-name> ::= ?_E # vector deleting destructor
// FIXME: Add a vector deleting dtor type. It goes in the vtable, so we need
// it.
case Dtor_Comdat:
llvm_unreachable("not expecting a COMDAT");
}
llvm_unreachable("Unsupported dtor type?");
}
void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
SourceLocation Loc) {
switch (OO) {
// ?0 # constructor
// ?1 # destructor
// <operator-name> ::= ?2 # new
case OO_New: Out << "?2"; break;
// <operator-name> ::= ?3 # delete
case OO_Delete: Out << "?3"; break;
// <operator-name> ::= ?4 # =
case OO_Equal: Out << "?4"; break;
// <operator-name> ::= ?5 # >>
case OO_GreaterGreater: Out << "?5"; break;
// <operator-name> ::= ?6 # <<
case OO_LessLess: Out << "?6"; break;
// <operator-name> ::= ?7 # !
case OO_Exclaim: Out << "?7"; break;
// <operator-name> ::= ?8 # ==
case OO_EqualEqual: Out << "?8"; break;
// <operator-name> ::= ?9 # !=
case OO_ExclaimEqual: Out << "?9"; break;
// <operator-name> ::= ?A # []
case OO_Subscript: Out << "?A"; break;
// ?B # conversion
// <operator-name> ::= ?C # ->
case OO_Arrow: Out << "?C"; break;
// <operator-name> ::= ?D # *
case OO_Star: Out << "?D"; break;
// <operator-name> ::= ?E # ++
case OO_PlusPlus: Out << "?E"; break;
// <operator-name> ::= ?F # --
case OO_MinusMinus: Out << "?F"; break;
// <operator-name> ::= ?G # -
case OO_Minus: Out << "?G"; break;
// <operator-name> ::= ?H # +
case OO_Plus: Out << "?H"; break;
// <operator-name> ::= ?I # &
case OO_Amp: Out << "?I"; break;
// <operator-name> ::= ?J # ->*
case OO_ArrowStar: Out << "?J"; break;
// <operator-name> ::= ?K # /
case OO_Slash: Out << "?K"; break;
// <operator-name> ::= ?L # %
case OO_Percent: Out << "?L"; break;
// <operator-name> ::= ?M # <
case OO_Less: Out << "?M"; break;
// <operator-name> ::= ?N # <=
case OO_LessEqual: Out << "?N"; break;
// <operator-name> ::= ?O # >
case OO_Greater: Out << "?O"; break;
// <operator-name> ::= ?P # >=
case OO_GreaterEqual: Out << "?P"; break;
// <operator-name> ::= ?Q # ,
case OO_Comma: Out << "?Q"; break;
// <operator-name> ::= ?R # ()
case OO_Call: Out << "?R"; break;
// <operator-name> ::= ?S # ~
case OO_Tilde: Out << "?S"; break;
// <operator-name> ::= ?T # ^
case OO_Caret: Out << "?T"; break;
// <operator-name> ::= ?U # |
case OO_Pipe: Out << "?U"; break;
// <operator-name> ::= ?V # &&
case OO_AmpAmp: Out << "?V"; break;
// <operator-name> ::= ?W # ||
case OO_PipePipe: Out << "?W"; break;
// <operator-name> ::= ?X # *=
case OO_StarEqual: Out << "?X"; break;
// <operator-name> ::= ?Y # +=
case OO_PlusEqual: Out << "?Y"; break;
// <operator-name> ::= ?Z # -=
case OO_MinusEqual: Out << "?Z"; break;
// <operator-name> ::= ?_0 # /=
case OO_SlashEqual: Out << "?_0"; break;
// <operator-name> ::= ?_1 # %=
case OO_PercentEqual: Out << "?_1"; break;
// <operator-name> ::= ?_2 # >>=
case OO_GreaterGreaterEqual: Out << "?_2"; break;
// <operator-name> ::= ?_3 # <<=
case OO_LessLessEqual: Out << "?_3"; break;
// <operator-name> ::= ?_4 # &=
case OO_AmpEqual: Out << "?_4"; break;
// <operator-name> ::= ?_5 # |=
case OO_PipeEqual: Out << "?_5"; break;
// <operator-name> ::= ?_6 # ^=
case OO_CaretEqual: Out << "?_6"; break;
// ?_7 # vftable
// ?_8 # vbtable
// ?_9 # vcall
// ?_A # typeof
// ?_B # local static guard
// ?_C # string
// ?_D # vbase destructor
// ?_E # vector deleting destructor
// ?_F # default constructor closure
// ?_G # scalar deleting destructor
// ?_H # vector constructor iterator
// ?_I # vector destructor iterator
// ?_J # vector vbase constructor iterator
// ?_K # virtual displacement map
// ?_L # eh vector constructor iterator
// ?_M # eh vector destructor iterator
// ?_N # eh vector vbase constructor iterator
// ?_O # copy constructor closure
// ?_P<name> # udt returning <name>
// ?_Q # <unknown>
// ?_R0 # RTTI Type Descriptor
// ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
// ?_R2 # RTTI Base Class Array
// ?_R3 # RTTI Class Hierarchy Descriptor
// ?_R4 # RTTI Complete Object Locator
// ?_S # local vftable
// ?_T # local vftable constructor closure
// <operator-name> ::= ?_U # new[]
case OO_Array_New: Out << "?_U"; break;
// <operator-name> ::= ?_V # delete[]
case OO_Array_Delete: Out << "?_V"; break;
case OO_Conditional: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this conditional operator yet");
Diags.Report(Loc, DiagID);
break;
}
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Not an overloaded operator");
}
}
void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
// <source name> ::= <identifier> @
BackRefVec::iterator Found =
std::find(NameBackReferences.begin(), NameBackReferences.end(), Name);
if (Found == NameBackReferences.end()) {
if (NameBackReferences.size() < 10)
NameBackReferences.push_back(Name);
Out << Name << '@';
} else {
Out << (Found - NameBackReferences.begin());
}
}
void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
// <template-name> ::= <unscoped-template-name> <template-args>
// ::= <substitution>
// Always start with the unqualified name.
// Templates have their own context for back references.
ArgBackRefMap OuterArgsContext;
BackRefVec OuterTemplateContext;
NameBackReferences.swap(OuterTemplateContext);
TypeBackReferences.swap(OuterArgsContext);
mangleUnscopedTemplateName(TD);
mangleTemplateArgs(TD, TemplateArgs);
// Restore the previous back reference contexts.
NameBackReferences.swap(OuterTemplateContext);
TypeBackReferences.swap(OuterArgsContext);
}
void
MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
// <unscoped-template-name> ::= ?$ <unqualified-name>
Out << "?$";
mangleUnqualifiedName(TD);
}
void MicrosoftCXXNameMangler::mangleIntegerLiteral(const llvm::APSInt &Value,
bool IsBoolean) {
// <integer-literal> ::= $0 <number>
Out << "$0";
// Make sure booleans are encoded as 0/1.
if (IsBoolean && Value.getBoolValue())
mangleNumber(1);
else if (Value.isSigned())
mangleNumber(Value.getSExtValue());
else
mangleNumber(Value.getZExtValue());
}
void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
// See if this is a constant expression.
llvm::APSInt Value;
if (E->isIntegerConstantExpr(Value, Context.getASTContext())) {
mangleIntegerLiteral(Value, E->getType()->isBooleanType());
return;
}
// Look through no-op casts like template parameter substitutions.
E = E->IgnoreParenNoopCasts(Context.getASTContext());
const CXXUuidofExpr *UE = nullptr;
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
if (UO->getOpcode() == UO_AddrOf)
UE = dyn_cast<CXXUuidofExpr>(UO->getSubExpr());
} else
UE = dyn_cast<CXXUuidofExpr>(E);
if (UE) {
// This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
// const __s_GUID _GUID_{lower case UUID with underscores}
StringRef Uuid = UE->getUuidAsStringRef(Context.getASTContext());
std::string Name = "_GUID_" + Uuid.lower();
std::replace(Name.begin(), Name.end(), '-', '_');
// If we had to peek through an address-of operator, treat this like we are
// dealing with a pointer type. Otherwise, treat it like a const reference.
//
// N.B. This matches up with the handling of TemplateArgument::Declaration
// in mangleTemplateArg
if (UE == E)
Out << "$E?";
else
Out << "$1?";
Out << Name << "@@3U__s_GUID@@B";
return;
}
// As bad as this diagnostic is, it's better than crashing.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot yet mangle expression type %0");
Diags.Report(E->getExprLoc(), DiagID) << E->getStmtClassName()
<< E->getSourceRange();
}
void MicrosoftCXXNameMangler::mangleTemplateArgs(
const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
// <template-args> ::= <template-arg>+
const TemplateParameterList *TPL = TD->getTemplateParameters();
assert(TPL->size() == TemplateArgs.size() &&
"size mismatch between args and parms!");
unsigned Idx = 0;
for (const TemplateArgument &TA : TemplateArgs.asArray())
mangleTemplateArg(TD, TA, TPL->getParam(Idx++));
}
void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
const TemplateArgument &TA,
const NamedDecl *Parm) {
// <template-arg> ::= <type>
// ::= <integer-literal>
// ::= <member-data-pointer>
// ::= <member-function-pointer>
// ::= $E? <name> <type-encoding>
// ::= $1? <name> <type-encoding>
// ::= $0A@
// ::= <template-args>
switch (TA.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Can't mangle null template arguments!");
case TemplateArgument::TemplateExpansion:
llvm_unreachable("Can't mangle template expansion arguments!");
case TemplateArgument::Type: {
QualType T = TA.getAsType();
mangleType(T, SourceRange(), QMM_Escape);
break;
}
case TemplateArgument::Declaration: {
const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl());
if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
mangleMemberDataPointer(
cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
cast<ValueDecl>(ND));
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD);
} else {
Out << "$1?";
mangleName(FD);
mangleFunctionEncoding(FD, /*ShouldMangle=*/true);
}
} else {
mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
}
break;
}
case TemplateArgument::Integral:
mangleIntegerLiteral(TA.getAsIntegral(),
TA.getIntegralType()->isBooleanType());
break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
if (MPT->isMemberFunctionPointerType() && isa<ClassTemplateDecl>(TD)) {
mangleMemberFunctionPointer(RD, nullptr);
return;
}
if (MPT->isMemberDataPointer()) {
if (isa<ClassTemplateDecl>(TD)) {
mangleMemberDataPointer(RD, nullptr);
return;
}
// nullptr data pointers are always represented with a single field
// which is initialized with either 0 or -1. Why -1? Well, we need to
// distinguish the case where the data member is at offset zero in the
// record.
// However, we are free to use 0 *if* we would use multiple fields for
// non-nullptr member pointers.
if (!RD->nullFieldOffsetIsZero()) {
mangleIntegerLiteral(llvm::APSInt::get(-1), /*IsBoolean=*/false);
return;
}
}
}
mangleIntegerLiteral(llvm::APSInt::getUnsigned(0), /*IsBoolean=*/false);
break;
}
case TemplateArgument::Expression:
mangleExpression(TA.getAsExpr());
break;
case TemplateArgument::Pack: {
ArrayRef<TemplateArgument> TemplateArgs = TA.getPackAsArray();
if (TemplateArgs.empty()) {
if (isa<TemplateTypeParmDecl>(Parm) ||
isa<TemplateTemplateParmDecl>(Parm))
// MSVC 2015 changed the mangling for empty expanded template packs,
// use the old mangling for link compatibility for old versions.
Out << (Context.getASTContext().getLangOpts().isCompatibleWithMSVC(
LangOptions::MSVC2015)
? "$$V"
: "$$$V");
else if (isa<NonTypeTemplateParmDecl>(Parm))
Out << "$S";
else
llvm_unreachable("unexpected template parameter decl!");
} else {
for (const TemplateArgument &PA : TemplateArgs)
mangleTemplateArg(TD, PA, Parm);
}
break;
}
case TemplateArgument::Template: {
const NamedDecl *ND =
TA.getAsTemplate().getAsTemplateDecl()->getTemplatedDecl();
if (const auto *TD = dyn_cast<TagDecl>(ND)) {
mangleType(TD);
} else if (isa<TypeAliasDecl>(ND)) {
Out << "$$Y";
mangleName(ND);
} else {
llvm_unreachable("unexpected template template NamedDecl!");
}
break;
}
}
}
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
bool IsMember) {
// <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
// 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
// 'I' means __restrict (32/64-bit).
// Note that the MSVC __restrict keyword isn't the same as the C99 restrict
// keyword!
// <base-cvr-qualifiers> ::= A # near
// ::= B # near const
// ::= C # near volatile
// ::= D # near const volatile
// ::= E # far (16-bit)
// ::= F # far const (16-bit)
// ::= G # far volatile (16-bit)
// ::= H # far const volatile (16-bit)
// ::= I # huge (16-bit)
// ::= J # huge const (16-bit)
// ::= K # huge volatile (16-bit)
// ::= L # huge const volatile (16-bit)
// ::= M <basis> # based
// ::= N <basis> # based const
// ::= O <basis> # based volatile
// ::= P <basis> # based const volatile
// ::= Q # near member
// ::= R # near const member
// ::= S # near volatile member
// ::= T # near const volatile member
// ::= U # far member (16-bit)
// ::= V # far const member (16-bit)
// ::= W # far volatile member (16-bit)
// ::= X # far const volatile member (16-bit)
// ::= Y # huge member (16-bit)
// ::= Z # huge const member (16-bit)
// ::= 0 # huge volatile member (16-bit)
// ::= 1 # huge const volatile member (16-bit)
// ::= 2 <basis> # based member
// ::= 3 <basis> # based const member
// ::= 4 <basis> # based volatile member
// ::= 5 <basis> # based const volatile member
// ::= 6 # near function (pointers only)
// ::= 7 # far function (pointers only)
// ::= 8 # near method (pointers only)
// ::= 9 # far method (pointers only)
// ::= _A <basis> # based function (pointers only)
// ::= _B <basis> # based function (far?) (pointers only)
// ::= _C <basis> # based method (pointers only)
// ::= _D <basis> # based method (far?) (pointers only)
// ::= _E # block (Clang)
// <basis> ::= 0 # __based(void)
// ::= 1 # __based(segment)?
// ::= 2 <name> # __based(name)
// ::= 3 # ?
// ::= 4 # ?
// ::= 5 # not really based
bool HasConst = Quals.hasConst(),
HasVolatile = Quals.hasVolatile();
if (!IsMember) {
if (HasConst && HasVolatile) {
Out << 'D';
} else if (HasVolatile) {
Out << 'C';
} else if (HasConst) {
Out << 'B';
} else {
Out << 'A';
}
} else {
if (HasConst && HasVolatile) {
Out << 'T';
} else if (HasVolatile) {
Out << 'S';
} else if (HasConst) {
Out << 'R';
} else {
Out << 'Q';
}
}
// FIXME: For now, just drop all extension qualifiers on the floor.
}
void
MicrosoftCXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
// <ref-qualifier> ::= G # lvalue reference
// ::= H # rvalue-reference
switch (RefQualifier) {
case RQ_None:
break;
case RQ_LValue:
Out << 'G';
break;
case RQ_RValue:
Out << 'H';
break;
}
}
void MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals,
QualType PointeeType) {
bool HasRestrict = Quals.hasRestrict();
if (PointersAre64Bit &&
(PointeeType.isNull() || !PointeeType->isFunctionType()))
Out << 'E';
if (HasRestrict)
Out << 'I';
}
void MicrosoftCXXNameMangler::manglePointerCVQualifiers(Qualifiers Quals) {
// <pointer-cv-qualifiers> ::= P # no qualifiers
// ::= Q # const
// ::= R # volatile
// ::= S # const volatile
bool HasConst = Quals.hasConst(),
HasVolatile = Quals.hasVolatile();
if (HasConst && HasVolatile) {
Out << 'S';
} else if (HasVolatile) {
Out << 'R';
} else if (HasConst) {
Out << 'Q';
} else {
Out << 'P';
}
}
void MicrosoftCXXNameMangler::mangleArgumentType(QualType T,
SourceRange Range) {
// MSVC will backreference two canonically equivalent types that have slightly
// different manglings when mangled alone.
// Decayed types do not match up with non-decayed versions of the same type.
//
// e.g.
// void (*x)(void) will not form a backreference with void x(void)
void *TypePtr;
if (const auto *DT = T->getAs<DecayedType>()) {
QualType OriginalType = DT->getOriginalType();
// All decayed ArrayTypes should be treated identically; as-if they were
// a decayed IncompleteArrayType.
if (const auto *AT = getASTContext().getAsArrayType(OriginalType))
OriginalType = getASTContext().getIncompleteArrayType(
AT->getElementType(), AT->getSizeModifier(),
AT->getIndexTypeCVRQualifiers());
TypePtr = OriginalType.getCanonicalType().getAsOpaquePtr();
// If the original parameter was textually written as an array,
// instead treat the decayed parameter like it's const.
//
// e.g.
// int [] -> int * const
if (OriginalType->isArrayType())
T = T.withConst();
} else {
TypePtr = T.getCanonicalType().getAsOpaquePtr();
}
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
if (Found == TypeBackReferences.end()) {
size_t OutSizeBefore = Out.tell();
mangleType(T, Range, QMM_Drop);
// See if it's worth creating a back reference.
// Only types longer than 1 character are considered
// and only 10 back references slots are available:
bool LongerThanOneChar = (Out.tell() - OutSizeBefore > 1);
if (LongerThanOneChar && TypeBackReferences.size() < 10) {
size_t Size = TypeBackReferences.size();
TypeBackReferences[TypePtr] = Size;
}
} else {
Out << Found->second;
}
}
void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM) {
// Don't use the canonical types. MSVC includes things like 'const' on
// pointer arguments to function pointers that canonicalization strips away.
T = T.getDesugaredType(getASTContext());
Qualifiers Quals = T.getLocalQualifiers();
if (const ArrayType *AT = getASTContext().getAsArrayType(T)) {
// If there were any Quals, getAsArrayType() pushed them onto the array
// element type.
if (QMM == QMM_Mangle)
Out << 'A';
else if (QMM == QMM_Escape || QMM == QMM_Result)
Out << "$$B";
mangleArrayType(AT);
return;
}
bool IsPointer = T->isAnyPointerType() || T->isMemberPointerType() ||
T->isReferenceType() || T->isBlockPointerType();
switch (QMM) {
case QMM_Drop:
break;
case QMM_Mangle:
if (const FunctionType *FT = dyn_cast<FunctionType>(T)) {
Out << '6';
mangleFunctionType(FT);
return;
}
mangleQualifiers(Quals, false);
break;
case QMM_Escape:
if (!IsPointer && Quals) {
Out << "$$C";
mangleQualifiers(Quals, false);
}
break;
case QMM_Result:
if ((!IsPointer && Quals) || isa<TagType>(T)) {
Out << '?';
mangleQualifiers(Quals, false);
}
break;
}
const Type *ty = T.getTypePtr();
switch (ty->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT) \
case Type::CLASS: \
llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
return;
#define TYPE(CLASS, PARENT) \
case Type::CLASS: \
mangleType(cast<CLASS##Type>(ty), Quals, Range); \
break;
#include "clang/AST/TypeNodes.def"
#undef ABSTRACT_TYPE
#undef NON_CANONICAL_TYPE
#undef TYPE
}
}
void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
SourceRange Range) {
// <type> ::= <builtin-type>
// <builtin-type> ::= X # void
// ::= C # signed char
// ::= D # char
// ::= E # unsigned char
// ::= F # short
// ::= G # unsigned short (or wchar_t if it's not a builtin)
// ::= H # int
// ::= I # unsigned int
// ::= J # long
// ::= K # unsigned long
// L # <none>
// ::= M # float
// ::= N # double
// ::= O # long double (__float80 is mangled differently)
// ::= _J # long long, __int64
// ::= _K # unsigned long long, __int64
// ::= _L # __int128
// ::= _M # unsigned __int128
// ::= _N # bool
// _O # <array in parameter>
// ::= _T # __float80 (Intel)
// ::= _W # wchar_t
// ::= _Z # __float80 (Digital Mars)
switch (T->getKind()) {
case BuiltinType::Void: Out << 'X'; break;
case BuiltinType::SChar: Out << 'C'; break;
case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
case BuiltinType::UChar: Out << 'E'; break;
case BuiltinType::Short: Out << 'F'; break;
case BuiltinType::UShort: Out << 'G'; break;
case BuiltinType::Int: Out << 'H'; break;
case BuiltinType::UInt: Out << 'I'; break;
case BuiltinType::Long: Out << 'J'; break;
case BuiltinType::ULong: Out << 'K'; break;
case BuiltinType::Float: Out << 'M'; break;
case BuiltinType::LitFloat: // HLSL Change
case BuiltinType::Double: Out << 'N'; break;
// TODO: Determine size and mangle accordingly
case BuiltinType::LongDouble: Out << 'O'; break;
case BuiltinType::LitInt: // HLSL Change
case BuiltinType::LongLong: Out << "_J"; break;
case BuiltinType::ULongLong: Out << "_K"; break;
case BuiltinType::Int128: Out << "_L"; break;
case BuiltinType::UInt128: Out << "_M"; break;
case BuiltinType::Bool: Out << "_N"; break;
case BuiltinType::Char16: Out << "_S"; break;
case BuiltinType::Char32: Out << "_U"; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U: Out << "_W"; break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
case BuiltinType::OCLImage1d: Out << "PAUocl_image1d@@"; break;
case BuiltinType::OCLImage1dArray: Out << "PAUocl_image1darray@@"; break;
case BuiltinType::OCLImage1dBuffer: Out << "PAUocl_image1dbuffer@@"; break;
case BuiltinType::OCLImage2d: Out << "PAUocl_image2d@@"; break;
case BuiltinType::OCLImage2dArray: Out << "PAUocl_image2darray@@"; break;
case BuiltinType::OCLImage3d: Out << "PAUocl_image3d@@"; break;
case BuiltinType::OCLSampler: Out << "PAUocl_sampler@@"; break;
case BuiltinType::OCLEvent: Out << "PAUocl_event@@"; break;
case BuiltinType::NullPtr: Out << "$$T"; break;
// HLSL Change Starts
case BuiltinType::Min10Float:
Out << "$min10f@";
break;
case BuiltinType::Min16Float:
Out << "$min16f@";
break;
case BuiltinType::Min16Int:
Out << "$min16i@";
break;
case BuiltinType::Min16UInt:
Out << "$min16ui@";
break;
case BuiltinType::HalfFloat:
Out << "$halff@";
break;
case BuiltinType::Min12Int:
Out << "$min12i@";
break;
case BuiltinType::Half:
Out << "$f16@";
break;
case BuiltinType::Int8_4Packed:
Out << "$i8_4pk@";
break;
case BuiltinType::UInt8_4Packed:
Out << "$ui8_4pk@";
break;
// HLSL Change Ends
}
}
// <type> ::= <function-type>
void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, Qualifiers,
SourceRange) {
// Structors only appear in decls, so at this point we know it's not a
// structor type.
// FIXME: This may not be lambda-friendly.
if (T->getTypeQuals() || T->getRefQualifier() != RQ_None) {
Out << "$$A8@@";
mangleFunctionType(T, /*D=*/nullptr, /*ForceThisQuals=*/true);
} else {
Out << "$$A6";
mangleFunctionType(T);
}
}
void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
Qualifiers, SourceRange) {
llvm_unreachable("Can't mangle K&R function prototypes");
}
void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
const FunctionDecl *D,
bool ForceThisQuals) {
// <function-type> ::= <this-cvr-qualifiers> <calling-convention>
// <return-type> <argument-list> <throw-spec>
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
SourceRange Range;
if (D) Range = D->getSourceRange();
bool IsStructor = false, HasThisQuals = ForceThisQuals, IsCtorClosure = false;
CallingConv CC = T->getCallConv();
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) {
if (MD->isInstance())
HasThisQuals = true;
if (isa<CXXDestructorDecl>(MD)) {
IsStructor = true;
} else if (isa<CXXConstructorDecl>(MD)) {
IsStructor = true;
IsCtorClosure = (StructorType == Ctor_CopyingClosure ||
StructorType == Ctor_DefaultClosure) &&
getStructor(MD) == Structor;
if (IsCtorClosure)
CC = getASTContext().getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
}
}
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
if (HasThisQuals) {
Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals());
manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType());
mangleRefQualifier(Proto->getRefQualifier());
mangleQualifiers(Quals, /*IsMember=*/false);
}
mangleCallingConvention(CC);
// <return-type> ::= <type>
// ::= @ # structors (they have no declared return type)
if (IsStructor) {
if (isa<CXXDestructorDecl>(D) && D == Structor &&
StructorType == Dtor_Deleting) {
// The scalar deleting destructor takes an extra int argument.
// However, the FunctionType generated has 0 arguments.
// FIXME: This is a temporary hack.
// Maybe should fix the FunctionType creation instead?
Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z");
return;
}
if (IsCtorClosure) {
// Default constructor closure and copy constructor closure both return
// void.
Out << 'X';
if (StructorType == Ctor_DefaultClosure) {
// Default constructor closure always has no arguments.
Out << 'X';
} else if (StructorType == Ctor_CopyingClosure) {
// Copy constructor closure always takes an unqualified reference.
mangleArgumentType(getASTContext().getLValueReferenceType(
Proto->getParamType(0)
->getAs<LValueReferenceType>()
->getPointeeType(),
/*SpelledAsLValue=*/true),
Range);
Out << '@';
} else {
llvm_unreachable("unexpected constructor closure!");
}
Out << 'Z';
return;
}
Out << '@';
} else {
QualType ResultType = Proto->getReturnType();
if (const auto *AT =
dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
Out << '?';
mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
Out << '?';
mangleSourceName(AT->isDecltypeAuto() ? "<decltype-auto>" : "<auto>");
Out << '@';
} else {
if (ResultType->isVoidType())
ResultType = ResultType.getUnqualifiedType();
mangleType(ResultType, Range, QMM_Result);
}
}
// <argument-list> ::= X # void
// ::= <type>+ @
// ::= <type>* Z # varargs
if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
Out << 'X';
} else {
// Happens for function pointer type arguments for example.
for (const QualType &Arg : Proto->param_types())
mangleArgumentType(Arg, Range);
// <builtin-type> ::= Z # ellipsis
if (Proto->isVariadic())
Out << 'Z';
else
Out << '@';
}
mangleThrowSpecification(Proto);
}
void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
// <function-class> ::= <member-function> E? # E designates a 64-bit 'this'
// # pointer. in 64-bit mode *all*
// # 'this' pointers are 64-bit.
// ::= <global-function>
// <member-function> ::= A # private: near
// ::= B # private: far
// ::= C # private: static near
// ::= D # private: static far
// ::= E # private: virtual near
// ::= F # private: virtual far
// ::= I # protected: near
// ::= J # protected: far
// ::= K # protected: static near
// ::= L # protected: static far
// ::= M # protected: virtual near
// ::= N # protected: virtual far
// ::= Q # public: near
// ::= R # public: far
// ::= S # public: static near
// ::= T # public: static far
// ::= U # public: virtual near
// ::= V # public: virtual far
// <global-function> ::= Y # global near
// ::= Z # global far
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
switch (MD->getAccess()) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
if (MD->isStatic())
Out << 'C';
else if (MD->isVirtual())
Out << 'E';
else
Out << 'A';
break;
case AS_protected:
if (MD->isStatic())
Out << 'K';
else if (MD->isVirtual())
Out << 'M';
else
Out << 'I';
break;
case AS_public:
if (MD->isStatic())
Out << 'S';
else if (MD->isVirtual())
Out << 'U';
else
Out << 'Q';
}
} else {
Out << 'Y';
}
}
void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
// <calling-convention> ::= A # __cdecl
// ::= B # __export __cdecl
// ::= C # __pascal
// ::= D # __export __pascal
// ::= E # __thiscall
// ::= F # __export __thiscall
// ::= G # __stdcall
// ::= H # __export __stdcall
// ::= I # __fastcall
// ::= J # __export __fastcall
// ::= Q # __vectorcall
// The 'export' calling conventions are from a bygone era
// (*cough*Win16*cough*) when functions were declared for export with
// that keyword. (It didn't actually export them, it just made them so
// that they could be in a DLL and somebody from another module could call
// them.)
switch (CC) {
default:
llvm_unreachable("Unsupported CC for mangling");
case CC_X86_64Win64:
case CC_X86_64SysV:
case CC_C: Out << 'A'; break;
case CC_X86Pascal: Out << 'C'; break;
case CC_X86ThisCall: Out << 'E'; break;
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
case CC_X86VectorCall: Out << 'Q'; break;
}
}
void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
mangleCallingConvention(T->getCallConv());
}
void MicrosoftCXXNameMangler::mangleThrowSpecification(
const FunctionProtoType *FT) {
// <throw-spec> ::= Z # throw(...) (default)
// ::= @ # throw() or __declspec/__attribute__((nothrow))
// ::= <type>+
// NOTE: Since the Microsoft compiler ignores throw specifications, they are
// all actually mangled as 'Z'. (They're ignored because their associated
// functionality isn't implemented, and probably never will be.)
Out << 'Z';
}
void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
Qualifiers, SourceRange Range) {
// Probably should be mangled as a template instantiation; need to see what
// VC does first.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this unresolved dependent type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
// <union-type> ::= T <name>
// <struct-type> ::= U <name>
// <class-type> ::= V <name>
// <enum-type> ::= W4 <name>
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
SourceRange) {
mangleType(cast<TagType>(T)->getDecl());
}
void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers,
SourceRange) {
mangleType(cast<TagType>(T)->getDecl());
}
void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
switch (TD->getTagKind()) {
case TTK_Union:
Out << 'T';
break;
case TTK_Struct:
case TTK_Interface:
Out << 'U';
break;
case TTK_Class:
Out << 'V';
break;
case TTK_Enum:
Out << "W4";
break;
}
mangleName(TD);
}
// <type> ::= <array-type>
// <array-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
// [Y <dimension-count> <dimension>+]
// <element-type> # as global, E is never required
// It's supposed to be the other way around, but for some strange reason, it
// isn't. Today this behavior is retained for the sole purpose of backwards
// compatibility.
void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) {
// This isn't a recursive mangling, so now we have to do it all in this
// one call.
manglePointerCVQualifiers(T->getElementType().getQualifiers());
mangleType(T->getElementType(), SourceRange());
}
void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T, Qualifiers,
SourceRange) {
llvm_unreachable("Should have been special cased");
}
void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T, Qualifiers,
SourceRange) {
llvm_unreachable("Should have been special cased");
}
void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T,
Qualifiers, SourceRange) {
llvm_unreachable("Should have been special cased");
}
void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T,
Qualifiers, SourceRange) {
llvm_unreachable("Should have been special cased");
}
void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) {
QualType ElementTy(T, 0);
SmallVector<llvm::APInt, 3> Dimensions;
for (;;) {
if (ElementTy->isConstantArrayType()) {
const ConstantArrayType *CAT =
getASTContext().getAsConstantArrayType(ElementTy);
Dimensions.push_back(CAT->getSize());
ElementTy = CAT->getElementType();
} else if (ElementTy->isIncompleteArrayType()) {
const IncompleteArrayType *IAT =
getASTContext().getAsIncompleteArrayType(ElementTy);
Dimensions.push_back(llvm::APInt(32, 0));
ElementTy = IAT->getElementType();
} else if (ElementTy->isVariableArrayType()) {
const VariableArrayType *VAT =
getASTContext().getAsVariableArrayType(ElementTy);
Dimensions.push_back(llvm::APInt(32, 0));
ElementTy = VAT->getElementType();
} else if (ElementTy->isDependentSizedArrayType()) {
// The dependent expression has to be folded into a constant (TODO).
const DependentSizedArrayType *DSAT =
getASTContext().getAsDependentSizedArrayType(ElementTy);
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this dependent-length array yet");
Diags.Report(DSAT->getSizeExpr()->getExprLoc(), DiagID)
<< DSAT->getBracketsRange();
return;
} else {
break;
}
}
Out << 'Y';
// <dimension-count> ::= <number> # number of extra dimensions
mangleNumber(Dimensions.size());
for (const llvm::APInt &Dimension : Dimensions)
mangleNumber(Dimension.getLimitedValue());
mangleType(ElementTy, SourceRange(), QMM_Escape);
}
// <type> ::= <pointer-to-member-type>
// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
// <class name> <type>
void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, Qualifiers Quals,
SourceRange Range) {
QualType PointeeType = T->getPointeeType();
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
if (const FunctionProtoType *FPT = PointeeType->getAs<FunctionProtoType>()) {
Out << '8';
mangleName(T->getClass()->castAs<RecordType>()->getDecl());
mangleFunctionType(FPT, nullptr, true);
} else {
mangleQualifiers(PointeeType.getQualifiers(), true);
mangleName(T->getClass()->castAs<RecordType>()->getDecl());
mangleType(PointeeType, Range, QMM_Drop);
}
}
void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this template type parameter type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this substituted parameter pack yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
// <type> ::= <pointer-type>
// <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
// # the E is required for 64-bit non-static pointers
void MicrosoftCXXNameMangler::mangleType(const PointerType *T, Qualifiers Quals,
SourceRange Range) {
QualType PointeeType = T->getPointeeType();
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
// Object pointers never have qualifiers.
Out << 'A';
mangleType(PointeeType, Range);
}
// <type> ::= <reference-type>
// <reference-type> ::= A E? <cvr-qualifiers> <type>
// # the E is required for 64-bit non-static lvalue references
void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
Out << (Quals.hasVolatile() ? 'B' : 'A');
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
}
// <type> ::= <r-value-reference-type>
// <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type>
// # the E is required for 64-bit non-static rvalue references
void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
Out << (Quals.hasVolatile() ? "$$R" : "$$Q");
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
}
void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this complex number type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
SourceRange Range) {
const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>();
assert(ET && "vectors with non-builtin elements are unsupported");
uint64_t Width = getASTContext().getTypeSize(T);
// Pattern match exactly the typedefs in our intrinsic headers. Anything that
// doesn't match the Intel types uses a custom mangling below.
bool IsBuiltin = true;
llvm::Triple::ArchType AT =
getASTContext().getTargetInfo().getTriple().getArch();
if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
Out << "T__m64";
} else if (Width >= 128) {
if (ET->getKind() == BuiltinType::Float)
Out << "T__m" << Width;
else if (ET->getKind() == BuiltinType::LongLong)
Out << "T__m" << Width << 'i';
else if (ET->getKind() == BuiltinType::Double)
Out << "U__m" << Width << 'd';
else
IsBuiltin = false;
} else {
IsBuiltin = false;
}
} else {
IsBuiltin = false;
}
if (!IsBuiltin) {
// The MS ABI doesn't have a special mangling for vector types, so we define
// our own mangling to handle uses of __vector_size__ on user-specified
// types, and for extensions like __v4sf.
Out << "T__clang_vec" << T->getNumElements() << '_';
mangleType(ET, Quals, Range);
}
Out << "@@";
}
void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this extended vector type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this dependent-sized extended vector type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
SourceRange) {
// ObjC interfaces have structs underlying them.
Out << 'U';
mangleName(T->getDecl());
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
SourceRange Range) {
// We don't allow overloading by different protocol qualification,
// so mangling them isn't necessary.
mangleType(T->getBaseType(), Range);
}
void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T,
Qualifiers Quals, SourceRange Range) {
QualType PointeeType = T->getPointeeType();
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
Out << "_E";
mangleFunctionType(PointeeType->castAs<FunctionProtoType>());
}
void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *,
Qualifiers, SourceRange) {
llvm_unreachable("Cannot mangle injected class name type.");
}
void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this template specialization type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this dependent name type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(
const DependentTemplateSpecializationType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this dependent template specialization type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this pack expansion yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this typeof(type) yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this typeof(expression) yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this decltype() yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this unary transform type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const AutoType *T, Qualifiers,
SourceRange Range) {
assert(T->getDeducedType().isNull() && "expecting a dependent type!");
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this 'auto' type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this C11 atomic type yet");
Diags.Report(Range.getBegin(), DiagID)
<< Range;
}
void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
raw_ostream &Out) {
assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
"Invalid mangleName() call, argument is not a variable or function!");
assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
"Invalid mangleName() call on 'structor decl!");
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
MicrosoftCXXNameMangler Mangler(*this, Out);
return Mangler.mangle(D);
}
// <this-adjustment> ::= <no-adjustment> | <static-adjustment> |
// <virtual-adjustment>
// <no-adjustment> ::= A # private near
// ::= B # private far
// ::= I # protected near
// ::= J # protected far
// ::= Q # public near
// ::= R # public far
// <static-adjustment> ::= G <static-offset> # private near
// ::= H <static-offset> # private far
// ::= O <static-offset> # protected near
// ::= P <static-offset> # protected far
// ::= W <static-offset> # public near
// ::= X <static-offset> # public far
// <virtual-adjustment> ::= $0 <virtual-shift> <static-offset> # private near
// ::= $1 <virtual-shift> <static-offset> # private far
// ::= $2 <virtual-shift> <static-offset> # protected near
// ::= $3 <virtual-shift> <static-offset> # protected far
// ::= $4 <virtual-shift> <static-offset> # public near
// ::= $5 <virtual-shift> <static-offset> # public far
// <virtual-shift> ::= <vtordisp-shift> | <vtordispex-shift>
// <vtordisp-shift> ::= <offset-to-vtordisp>
// <vtordispex-shift> ::= <offset-to-vbptr> <vbase-offset-offset>
// <offset-to-vtordisp>
static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
const ThisAdjustment &Adjustment,
MicrosoftCXXNameMangler &Mangler,
raw_ostream &Out) {
if (!Adjustment.Virtual.isEmpty()) {
Out << '$';
char AccessSpec;
switch (MD->getAccess()) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
AccessSpec = '0';
break;
case AS_protected:
AccessSpec = '2';
break;
case AS_public:
AccessSpec = '4';
}
if (Adjustment.Virtual.Microsoft.VBPtrOffset) {
Out << 'R' << AccessSpec;
Mangler.mangleNumber(
static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBPtrOffset));
Mangler.mangleNumber(
static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VBOffsetOffset));
Mangler.mangleNumber(
static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset));
Mangler.mangleNumber(static_cast<uint32_t>(Adjustment.NonVirtual));
} else {
Out << AccessSpec;
Mangler.mangleNumber(
static_cast<uint32_t>(Adjustment.Virtual.Microsoft.VtordispOffset));
Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
}
} else if (Adjustment.NonVirtual != 0) {
switch (MD->getAccess()) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
Out << 'G';
break;
case AS_protected:
Out << 'O';
break;
case AS_public:
Out << 'W';
}
Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
} else {
switch (MD->getAccess()) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
Out << 'A';
break;
case AS_protected:
Out << 'I';
break;
case AS_public:
Out << 'Q';
}
}
}
void
MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
raw_ostream &Out) {
MicrosoftVTableContext *VTContext =
cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
const MicrosoftVTableContext::MethodVFTableLocation &ML =
VTContext->getMethodVFTableLocation(GlobalDecl(MD));
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01?";
Mangler.mangleVirtualMemPtrThunk(MD, ML);
}
void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Out << "\01?";
Mangler.mangleName(MD);
mangleThunkThisAdjustment(MD, Thunk.This, Mangler, Out);
if (!Thunk.Return.isEmpty())
assert(Thunk.Method != nullptr &&
"Thunk info should hold the overridee decl");
const CXXMethodDecl *DeclForFPT = Thunk.Method ? Thunk.Method : MD;
Mangler.mangleFunctionType(
DeclForFPT->getType()->castAs<FunctionProtoType>(), MD);
}
void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &Adjustment, raw_ostream &Out) {
// FIXME: Actually, the dtor thunk should be emitted for vector deleting
// dtors rather than scalar deleting dtors. Just use the vector deleting dtor
// mangling manually until we support both deleting dtor types.
assert(Type == Dtor_Deleting);
MicrosoftCXXNameMangler Mangler(*this, Out, DD, Type);
Out << "\01??_E";
Mangler.mangleName(DD->getParent());
mangleThunkThisAdjustment(DD, Adjustment, Mangler, Out);
Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD);
}
void MicrosoftMangleContextImpl::mangleCXXVFTable(
const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) {
// <mangled-name> ::= ?_7 <class-name> <storage-class>
// <cvr-qualifiers> [<name>] @
// NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
// is always '6' for vftables.
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_7";
Mangler.mangleName(Derived);
Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const.
for (const CXXRecordDecl *RD : BasePath)
Mangler.mangleName(RD);
Mangler.getStream() << '@';
}
void MicrosoftMangleContextImpl::mangleCXXVBTable(
const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) {
// <mangled-name> ::= ?_8 <class-name> <storage-class>
// <cvr-qualifiers> [<name>] @
// NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
// is always '7' for vbtables.
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_8";
Mangler.mangleName(Derived);
Mangler.getStream() << "7B"; // '7' for vbtable, 'B' for const.
for (const CXXRecordDecl *RD : BasePath)
Mangler.mangleName(RD);
Mangler.getStream() << '@';
}
void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_R0";
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
Mangler.getStream() << "@8";
}
void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << '.';
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
}
void MicrosoftMangleContextImpl::mangleCXXCatchHandlerType(QualType T,
uint32_t Flags,
raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "llvm.eh.handlertype.";
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
Mangler.getStream() << '.' << Flags;
}
void MicrosoftMangleContextImpl::mangleCXXVirtualDisplacementMap(
const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_K";
Mangler.mangleName(SrcRD);
Mangler.getStream() << "$C";
Mangler.mangleName(DstRD);
}
void MicrosoftMangleContextImpl::mangleCXXThrowInfo(QualType T,
bool IsConst,
bool IsVolatile,
uint32_t NumEntries,
raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_TI";
if (IsConst)
Mangler.getStream() << 'C';
if (IsVolatile)
Mangler.getStream() << 'V';
Mangler.getStream() << NumEntries;
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
}
void MicrosoftMangleContextImpl::mangleCXXCatchableTypeArray(
QualType T, uint32_t NumEntries, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_CTA";
Mangler.getStream() << NumEntries;
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
}
void MicrosoftMangleContextImpl::mangleCXXCatchableType(
QualType T, const CXXConstructorDecl *CD, CXXCtorType CT, uint32_t Size,
uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBIndex,
raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_CT";
llvm::SmallString<64> RTTIMangling;
{
llvm::raw_svector_ostream Stream(RTTIMangling);
mangleCXXRTTI(T, Stream);
}
Mangler.getStream() << RTTIMangling.substr(1);
// VS2015 CTP6 omits the copy-constructor in the mangled name. This name is,
// in fact, superfluous but I'm not sure the change was made consciously.
// TODO: Revisit this when VS2015 gets released.
llvm::SmallString<64> CopyCtorMangling;
if (CD) {
llvm::raw_svector_ostream Stream(CopyCtorMangling);
mangleCXXCtor(CD, CT, Stream);
}
Mangler.getStream() << CopyCtorMangling.substr(1);
Mangler.getStream() << Size;
if (VBPtrOffset == -1) {
if (NVOffset) {
Mangler.getStream() << NVOffset;
}
} else {
Mangler.getStream() << NVOffset;
Mangler.getStream() << VBPtrOffset;
Mangler.getStream() << VBIndex;
}
}
void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassDescriptor(
const CXXRecordDecl *Derived, uint32_t NVOffset, int32_t VBPtrOffset,
uint32_t VBTableOffset, uint32_t Flags, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_R1";
Mangler.mangleNumber(NVOffset);
Mangler.mangleNumber(VBPtrOffset);
Mangler.mangleNumber(VBTableOffset);
Mangler.mangleNumber(Flags);
Mangler.mangleName(Derived);
Mangler.getStream() << "8";
}
void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassArray(
const CXXRecordDecl *Derived, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_R2";
Mangler.mangleName(Derived);
Mangler.getStream() << "8";
}
void MicrosoftMangleContextImpl::mangleCXXRTTIClassHierarchyDescriptor(
const CXXRecordDecl *Derived, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_R3";
Mangler.mangleName(Derived);
Mangler.getStream() << "8";
}
void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator(
const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) {
// <mangled-name> ::= ?_R4 <class-name> <storage-class>
// <cvr-qualifiers> [<name>] @
// NOTE: <cvr-qualifiers> here is always 'B' (const). <storage-class>
// is always '6' for vftables.
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_R4";
Mangler.mangleName(Derived);
Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const.
for (const CXXRecordDecl *RD : BasePath)
Mangler.mangleName(RD);
Mangler.getStream() << '@';
}
void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
// The function body is in the same comdat as the function with the handler,
// so the numbering here doesn't have to be the same across TUs.
//
// <mangled-name> ::= ?filt$ <filter-number> @0
Mangler.getStream() << "\01?filt$" << SEHFilterIds[EnclosingDecl]++ << "@0@";
Mangler.mangleName(EnclosingDecl);
}
void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
// The function body is in the same comdat as the function with the handler,
// so the numbering here doesn't have to be the same across TUs.
//
// <mangled-name> ::= ?fin$ <filter-number> @0
Mangler.getStream() << "\01?fin$" << SEHFinallyIds[EnclosingDecl]++ << "@0@";
Mangler.mangleName(EnclosingDecl);
}
void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
// This is just a made up unique string for the purposes of tbaa. undname
// does *not* know how to demangle it.
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << '?';
Mangler.mangleType(T, SourceRange());
}
void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
CXXCtorType Type,
raw_ostream &Out) {
MicrosoftCXXNameMangler mangler(*this, Out, D, Type);
mangler.mangle(D);
}
void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
CXXDtorType Type,
raw_ostream &Out) {
MicrosoftCXXNameMangler mangler(*this, Out, D, Type);
mangler.mangle(D);
}
void MicrosoftMangleContextImpl::mangleReferenceTemporary(const VarDecl *VD,
unsigned,
raw_ostream &) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this reference temporary yet");
getDiags().Report(VD->getLocation(), DiagID);
}
void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable(
const VarDecl *VD, unsigned GuardNum, raw_ostream &Out) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01?$TSS" << GuardNum << '@';
Mangler.mangleNestedName(VD);
}
void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
raw_ostream &Out) {
// <guard-name> ::= ?_B <postfix> @5 <scope-depth>
// ::= ?__J <postfix> @5 <scope-depth>
// ::= ?$S <guard-num> @ <postfix> @4IA
// The first mangling is what MSVC uses to guard static locals in inline
// functions. It uses a different mangling in external functions to support
// guarding more than 32 variables. MSVC rejects inline functions with more
// than 32 static locals. We don't fully implement the second mangling
// because those guards are not externally visible, and instead use LLVM's
// default renaming when creating a new guard variable.
MicrosoftCXXNameMangler Mangler(*this, Out);
bool Visible = VD->isExternallyVisible();
if (Visible) {
Mangler.getStream() << (VD->getTLSKind() ? "\01??__J" : "\01??_B");
} else {
Mangler.getStream() << "\01?$S1@";
}
unsigned ScopeDepth = 0;
if (Visible && !getNextDiscriminator(VD, ScopeDepth))
// If we do not have a discriminator and are emitting a guard variable for
// use at global scope, then mangling the nested name will not be enough to
// remove ambiguities.
Mangler.mangle(VD, "");
else
Mangler.mangleNestedName(VD);
Mangler.getStream() << (Visible ? "@5" : "@4IA");
if (ScopeDepth)
Mangler.mangleNumber(ScopeDepth);
}
void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
raw_ostream &Out,
char CharCode) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??__" << CharCode;
Mangler.mangleName(D);
if (D->isStaticDataMember()) {
Mangler.mangleVariableEncoding(D);
Mangler.getStream() << '@';
}
// This is the function class mangling. These stubs are global, non-variadic,
// cdecl functions that return void and take no args.
Mangler.getStream() << "YAXXZ";
}
void MicrosoftMangleContextImpl::mangleDynamicInitializer(const VarDecl *D,
raw_ostream &Out) {
// <initializer-name> ::= ?__E <name> YAXXZ
mangleInitFiniStub(D, Out, 'E');
}
void
MicrosoftMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) {
// <destructor-name> ::= ?__F <name> YAXXZ
mangleInitFiniStub(D, Out, 'F');
}
void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
raw_ostream &Out) {
// <char-type> ::= 0 # char
// ::= 1 # wchar_t
// ::= ??? # char16_t/char32_t will need a mangling too...
//
// <literal-length> ::= <non-negative integer> # the length of the literal
//
// <encoded-crc> ::= <hex digit>+ @ # crc of the literal including
// # null-terminator
//
// <encoded-string> ::= <simple character> # uninteresting character
// ::= '?$' <hex digit> <hex digit> # these two nibbles
// # encode the byte for the
// # character
// ::= '?' [a-z] # \xe1 - \xfa
// ::= '?' [A-Z] # \xc1 - \xda
// ::= '?' [0-9] # [,/\:. \n\t'-]
//
// <literal> ::= '??_C@_' <char-type> <literal-length> <encoded-crc>
// <encoded-string> '@'
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??_C@_";
// <char-type>: The "kind" of string literal is encoded into the mangled name.
if (SL->isWide())
Mangler.getStream() << '1';
else
Mangler.getStream() << '0';
// <literal-length>: The next part of the mangled name consists of the length
// of the string.
// The StringLiteral does not consider the NUL terminator byte(s) but the
// mangling does.
// N.B. The length is in terms of bytes, not characters.
Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
// We will use the "Rocksoft^tm Model CRC Algorithm" to describe the
// properties of our CRC:
// Width : 32
// Poly : 04C11DB7
// Init : FFFFFFFF
// RefIn : True
// RefOut : True
// XorOut : 00000000
// Check : 340BC6D9
uint32_t CRC = 0xFFFFFFFFU;
auto UpdateCRC = [&CRC](char Byte) {
for (unsigned i = 0; i < 8; ++i) {
bool Bit = CRC & 0x80000000U;
if (Byte & (1U << i))
Bit = !Bit;
CRC <<= 1;
if (Bit)
CRC ^= 0x04C11DB7U;
}
};
auto GetLittleEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = Index % CharByteWidth;
return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
};
auto GetBigEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
};
// CRC all the bytes of the StringLiteral.
for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
UpdateCRC(GetLittleEndianByte(I));
// The NUL terminator byte(s) were not present earlier,
// we need to manually process those bytes into the CRC.
for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
++NullTerminator)
UpdateCRC('\x00');
// The literature refers to the process of reversing the bits in the final CRC
// output as "reflection".
CRC = llvm::reverseBits(CRC);
// <encoded-crc>: The CRC is encoded utilizing the standard number mangling
// scheme.
Mangler.mangleNumber(CRC);
// <encoded-string>: The mangled name also contains the first 32 _characters_
// (including null-terminator bytes) of the StringLiteral.
// Each character is encoded by splitting them into bytes and then encoding
// the constituent bytes.
auto MangleByte = [&Mangler](char Byte) {
// There are five different manglings for characters:
// - [a-zA-Z0-9_$]: A one-to-one mapping.
// - ?[a-z]: The range from \xe1 to \xfa.
// - ?[A-Z]: The range from \xc1 to \xda.
// - ?[0-9]: The set of [,/\:. \n\t'-].
// - ?$XX: A fallback which maps nibbles.
if (isIdentifierBody(Byte, /*AllowDollar=*/true)) {
Mangler.getStream() << Byte;
} else if (isLetter(Byte & 0x7f)) {
Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
} else {
const char SpecialChars[] = {',', '/', '\\', ':', '.',
' ', '\n', '\t', '\'', '-'};
const char *Pos =
std::find(std::begin(SpecialChars), std::end(SpecialChars), Byte);
if (Pos != std::end(SpecialChars)) {
Mangler.getStream() << '?' << (Pos - std::begin(SpecialChars));
} else {
Mangler.getStream() << "?$";
Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
}
}
};
// Enforce our 32 character max.
unsigned NumCharsToMangle = std::min(32U, SL->getLength());
for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
++I)
if (SL->isWide())
MangleByte(GetBigEndianByte(I));
else
MangleByte(GetLittleEndianByte(I));
// Encode the NUL terminator if there is room.
if (NumCharsToMangle < 32)
for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
++NullTerminator)
MangleByte(0);
Mangler.getStream() << '@';
}
void MicrosoftMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD,
raw_ostream &Out) {
if (!RD->isExternallyVisible()) {
// This part of the identifier needs to be unique across all translation
// units in the linked program. The scheme fails if multiple translation
// units are compiled using the same relative source file path, or if
// multiple translation units are built from the same source file.
SourceManager &SM = getASTContext().getSourceManager();
Out << "[" << SM.getFileEntryForID(SM.getMainFileID())->getName() << "]";
}
MicrosoftCXXNameMangler mangler(*this, Out);
mangler.mangleName(RD);
}
MicrosoftMangleContext *
MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new MicrosoftMangleContextImpl(Context, Diags);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTImporter.cpp | //===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ASTImporter class which imports AST nodes from one
// context into another context.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTImporter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include <deque>
// //
///////////////////////////////////////////////////////////////////////////////
namespace clang {
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
public DeclVisitor<ASTNodeImporter, Decl *>,
public StmtVisitor<ASTNodeImporter, Stmt *> {
ASTImporter &Importer;
public:
explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { }
using TypeVisitor<ASTNodeImporter, QualType>::Visit;
using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
// Importing types
QualType VisitType(const Type *T);
QualType VisitBuiltinType(const BuiltinType *T);
QualType VisitComplexType(const ComplexType *T);
QualType VisitPointerType(const PointerType *T);
QualType VisitBlockPointerType(const BlockPointerType *T);
QualType VisitLValueReferenceType(const LValueReferenceType *T);
QualType VisitRValueReferenceType(const RValueReferenceType *T);
QualType VisitMemberPointerType(const MemberPointerType *T);
QualType VisitConstantArrayType(const ConstantArrayType *T);
QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
QualType VisitVariableArrayType(const VariableArrayType *T);
// FIXME: DependentSizedArrayType
// FIXME: DependentSizedExtVectorType
QualType VisitVectorType(const VectorType *T);
QualType VisitExtVectorType(const ExtVectorType *T);
QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
QualType VisitFunctionProtoType(const FunctionProtoType *T);
// FIXME: UnresolvedUsingType
QualType VisitParenType(const ParenType *T);
QualType VisitTypedefType(const TypedefType *T);
QualType VisitTypeOfExprType(const TypeOfExprType *T);
// FIXME: DependentTypeOfExprType
QualType VisitTypeOfType(const TypeOfType *T);
QualType VisitDecltypeType(const DecltypeType *T);
QualType VisitUnaryTransformType(const UnaryTransformType *T);
QualType VisitAutoType(const AutoType *T);
// FIXME: DependentDecltypeType
QualType VisitRecordType(const RecordType *T);
QualType VisitEnumType(const EnumType *T);
QualType VisitAttributedType(const AttributedType *T);
// FIXME: TemplateTypeParmType
// FIXME: SubstTemplateTypeParmType
QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
QualType VisitElaboratedType(const ElaboratedType *T);
// FIXME: DependentNameType
// FIXME: DependentTemplateSpecializationType
QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
QualType VisitObjCObjectType(const ObjCObjectType *T);
QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
// Importing declarations
bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclContext *&LexicalDC, DeclarationName &Name,
NamedDecl *&ToD, SourceLocation &Loc);
void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
DeclarationNameInfo& To);
void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
/// \brief What we should import from the definition.
enum ImportDefinitionKind {
/// \brief Import the default subset of the definition, which might be
/// nothing (if minimal import is set) or might be everything (if minimal
/// import is not set).
IDK_Default,
/// \brief Import everything.
IDK_Everything,
/// \brief Import only the bare bones needed to establish a valid
/// DeclContext.
IDK_Basic
};
bool shouldForceImportDeclContext(ImportDefinitionKind IDK) {
return IDK == IDK_Everything ||
(IDK == IDK_Default && !Importer.isMinimalImport());
}
bool ImportDefinition(RecordDecl *From, RecordDecl *To,
ImportDefinitionKind Kind = IDK_Default);
bool ImportDefinition(VarDecl *From, VarDecl *To,
ImportDefinitionKind Kind = IDK_Default);
bool ImportDefinition(EnumDecl *From, EnumDecl *To,
ImportDefinitionKind Kind = IDK_Default);
bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
ImportDefinitionKind Kind = IDK_Default);
bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
ImportDefinitionKind Kind = IDK_Default);
TemplateParameterList *ImportTemplateParameterList(
TemplateParameterList *Params);
TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
bool ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
bool Complain = true);
bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
bool Complain = true);
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
Decl *VisitDecl(Decl *D);
Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
Decl *VisitTypedefDecl(TypedefDecl *D);
Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
Decl *VisitEnumDecl(EnumDecl *D);
Decl *VisitRecordDecl(RecordDecl *D);
Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
Decl *VisitFunctionDecl(FunctionDecl *D);
Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
Decl *VisitFieldDecl(FieldDecl *D);
Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
Decl *VisitVarDecl(VarDecl *D);
Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
Decl *VisitParmVarDecl(ParmVarDecl *D);
Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
Decl *VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
Decl *VisitLinkageSpecDecl(LinkageSpecDecl *D);
ObjCTypeParamList *ImportObjCTypeParamList(ObjCTypeParamList *list);
Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
Decl *VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D);
Decl *VisitVarTemplateDecl(VarTemplateDecl *D);
Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
// Importing statements
DeclGroupRef ImportDeclGroup(DeclGroupRef DG);
Stmt *VisitStmt(Stmt *S);
Stmt *VisitDeclStmt(DeclStmt *S);
Stmt *VisitNullStmt(NullStmt *S);
Stmt *VisitCompoundStmt(CompoundStmt *S);
Stmt *VisitCaseStmt(CaseStmt *S);
Stmt *VisitDefaultStmt(DefaultStmt *S);
Stmt *VisitLabelStmt(LabelStmt *S);
Stmt *VisitAttributedStmt(AttributedStmt *S);
Stmt *VisitIfStmt(IfStmt *S);
Stmt *VisitSwitchStmt(SwitchStmt *S);
Stmt *VisitWhileStmt(WhileStmt *S);
Stmt *VisitDoStmt(DoStmt *S);
Stmt *VisitForStmt(ForStmt *S);
Stmt *VisitGotoStmt(GotoStmt *S);
Stmt *VisitIndirectGotoStmt(IndirectGotoStmt *S);
Stmt *VisitContinueStmt(ContinueStmt *S);
Stmt *VisitBreakStmt(BreakStmt *S);
Stmt *VisitReturnStmt(ReturnStmt *S);
// FIXME: GCCAsmStmt
// FIXME: MSAsmStmt
// FIXME: SEHExceptStmt
// FIXME: SEHFinallyStmt
// FIXME: SEHTryStmt
// FIXME: SEHLeaveStmt
// FIXME: CapturedStmt
Stmt *VisitCXXCatchStmt(CXXCatchStmt *S);
Stmt *VisitCXXTryStmt(CXXTryStmt *S);
Stmt *VisitCXXForRangeStmt(CXXForRangeStmt *S);
// FIXME: MSDependentExistsStmt
Stmt *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
Stmt *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
Stmt *VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
Stmt *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
Stmt *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
Stmt *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
Stmt *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
// Importing expressions
Expr *VisitExpr(Expr *E);
Expr *VisitDeclRefExpr(DeclRefExpr *E);
Expr *VisitIntegerLiteral(IntegerLiteral *E);
Expr *VisitCharacterLiteral(CharacterLiteral *E);
Expr *VisitParenExpr(ParenExpr *E);
Expr *VisitUnaryOperator(UnaryOperator *E);
Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
Expr *VisitBinaryOperator(BinaryOperator *E);
Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
Expr *VisitMemberExpr(MemberExpr *E);
Expr *VisitCallExpr(CallExpr *E);
};
}
using namespace clang;
//----------------------------------------------------------------------------
// Structural Equivalence
//----------------------------------------------------------------------------
namespace {
struct StructuralEquivalenceContext {
/// \brief AST contexts for which we are checking structural equivalence.
ASTContext &C1, &C2;
/// \brief The set of "tentative" equivalences between two canonical
/// declarations, mapping from a declaration in the first context to the
/// declaration in the second context that we believe to be equivalent.
llvm::DenseMap<Decl *, Decl *> TentativeEquivalences;
/// \brief Queue of declarations in the first context whose equivalence
/// with a declaration in the second context still needs to be verified.
std::deque<Decl *> DeclsToCheck;
/// \brief Declaration (from, to) pairs that are known not to be equivalent
/// (which we have already complained about).
llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls;
/// \brief Whether we're being strict about the spelling of types when
/// unifying two types.
bool StrictTypeSpelling;
/// \brief Whether to complain about failures.
bool Complain;
/// \brief \c true if the last diagnostic came from C2.
bool LastDiagFromC2;
StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
bool StrictTypeSpelling = false,
bool Complain = true)
: C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
StrictTypeSpelling(StrictTypeSpelling), Complain(Complain),
LastDiagFromC2(false) {}
/// \brief Determine whether the two declarations are structurally
/// equivalent.
bool IsStructurallyEquivalent(Decl *D1, Decl *D2);
/// \brief Determine whether the two types are structurally equivalent.
bool IsStructurallyEquivalent(QualType T1, QualType T2);
private:
/// \brief Finish checking all of the structural equivalences.
///
/// \returns true if an error occurred, false otherwise.
bool Finish();
public:
DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
assert(Complain && "Not allowed to complain");
if (LastDiagFromC2)
C1.getDiagnostics().notePriorDiagnosticFrom(C2.getDiagnostics());
LastDiagFromC2 = false;
return C1.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
assert(Complain && "Not allowed to complain");
if (!LastDiagFromC2)
C2.getDiagnostics().notePriorDiagnosticFrom(C1.getDiagnostics());
LastDiagFromC2 = true;
return C2.getDiagnostics().Report(Loc, DiagID);
}
};
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
QualType T1, QualType T2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2);
/// \brief Determine structural equivalence of two expressions.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Expr *E1, Expr *E2) {
if (!E1 || !E2)
return E1 == E2;
// FIXME: Actually perform a structural comparison!
return true;
}
/// \brief Determine whether two identifiers are equivalent.
static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
const IdentifierInfo *Name2) {
if (!Name1 || !Name2)
return Name1 == Name2;
return Name1->getName() == Name2->getName();
}
/// \brief Determine whether two nested-name-specifiers are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NestedNameSpecifier *NNS1,
NestedNameSpecifier *NNS2) {
// FIXME: Implement!
return true;
}
/// \brief Determine whether two template arguments are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgument &Arg1,
const TemplateArgument &Arg2) {
if (Arg1.getKind() != Arg2.getKind())
return false;
switch (Arg1.getKind()) {
case TemplateArgument::Null:
return true;
case TemplateArgument::Type:
return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
case TemplateArgument::Integral:
if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
Arg2.getIntegralType()))
return false;
return llvm::APSInt::isSameValue(Arg1.getAsIntegral(), Arg2.getAsIntegral());
case TemplateArgument::Declaration:
return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
case TemplateArgument::NullPtr:
return true; // FIXME: Is this correct?
case TemplateArgument::Template:
return IsStructurallyEquivalent(Context,
Arg1.getAsTemplate(),
Arg2.getAsTemplate());
case TemplateArgument::TemplateExpansion:
return IsStructurallyEquivalent(Context,
Arg1.getAsTemplateOrTemplatePattern(),
Arg2.getAsTemplateOrTemplatePattern());
case TemplateArgument::Expression:
return IsStructurallyEquivalent(Context,
Arg1.getAsExpr(), Arg2.getAsExpr());
case TemplateArgument::Pack:
if (Arg1.pack_size() != Arg2.pack_size())
return false;
for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
if (!IsStructurallyEquivalent(Context,
Arg1.pack_begin()[I],
Arg2.pack_begin()[I]))
return false;
return true;
}
llvm_unreachable("Invalid template argument kind");
}
/// \brief Determine structural equivalence for the common part of array
/// types.
static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context,
const ArrayType *Array1,
const ArrayType *Array2) {
if (!IsStructurallyEquivalent(Context,
Array1->getElementType(),
Array2->getElementType()))
return false;
if (Array1->getSizeModifier() != Array2->getSizeModifier())
return false;
if (Array1->getIndexTypeQualifiers() != Array2->getIndexTypeQualifiers())
return false;
return true;
}
/// \brief Determine structural equivalence of two types.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
QualType T1, QualType T2) {
if (T1.isNull() || T2.isNull())
return T1.isNull() && T2.isNull();
if (!Context.StrictTypeSpelling) {
// We aren't being strict about token-to-token equivalence of types,
// so map down to the canonical type.
T1 = Context.C1.getCanonicalType(T1);
T2 = Context.C2.getCanonicalType(T2);
}
if (T1.getQualifiers() != T2.getQualifiers())
return false;
Type::TypeClass TC = T1->getTypeClass();
if (T1->getTypeClass() != T2->getTypeClass()) {
// Compare function types with prototypes vs. without prototypes as if
// both did not have prototypes.
if (T1->getTypeClass() == Type::FunctionProto &&
T2->getTypeClass() == Type::FunctionNoProto)
TC = Type::FunctionNoProto;
else if (T1->getTypeClass() == Type::FunctionNoProto &&
T2->getTypeClass() == Type::FunctionProto)
TC = Type::FunctionNoProto;
else
return false;
}
switch (TC) {
case Type::Builtin:
// FIXME: Deal with Char_S/Char_U.
if (cast<BuiltinType>(T1)->getKind() != cast<BuiltinType>(T2)->getKind())
return false;
break;
case Type::Complex:
if (!IsStructurallyEquivalent(Context,
cast<ComplexType>(T1)->getElementType(),
cast<ComplexType>(T2)->getElementType()))
return false;
break;
case Type::Adjusted:
case Type::Decayed:
if (!IsStructurallyEquivalent(Context,
cast<AdjustedType>(T1)->getOriginalType(),
cast<AdjustedType>(T2)->getOriginalType()))
return false;
break;
case Type::Pointer:
if (!IsStructurallyEquivalent(Context,
cast<PointerType>(T1)->getPointeeType(),
cast<PointerType>(T2)->getPointeeType()))
return false;
break;
case Type::BlockPointer:
if (!IsStructurallyEquivalent(Context,
cast<BlockPointerType>(T1)->getPointeeType(),
cast<BlockPointerType>(T2)->getPointeeType()))
return false;
break;
case Type::LValueReference:
case Type::RValueReference: {
const ReferenceType *Ref1 = cast<ReferenceType>(T1);
const ReferenceType *Ref2 = cast<ReferenceType>(T2);
if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue())
return false;
if (Ref1->isInnerRef() != Ref2->isInnerRef())
return false;
if (!IsStructurallyEquivalent(Context,
Ref1->getPointeeTypeAsWritten(),
Ref2->getPointeeTypeAsWritten()))
return false;
break;
}
case Type::MemberPointer: {
const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1);
const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2);
if (!IsStructurallyEquivalent(Context,
MemPtr1->getPointeeType(),
MemPtr2->getPointeeType()))
return false;
if (!IsStructurallyEquivalent(Context,
QualType(MemPtr1->getClass(), 0),
QualType(MemPtr2->getClass(), 0)))
return false;
break;
}
case Type::ConstantArray: {
const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
if (!llvm::APInt::isSameValue(Array1->getSize(), Array2->getSize()))
return false;
if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
return false;
break;
}
case Type::IncompleteArray:
if (!IsArrayStructurallyEquivalent(Context,
cast<ArrayType>(T1),
cast<ArrayType>(T2)))
return false;
break;
case Type::VariableArray: {
const VariableArrayType *Array1 = cast<VariableArrayType>(T1);
const VariableArrayType *Array2 = cast<VariableArrayType>(T2);
if (!IsStructurallyEquivalent(Context,
Array1->getSizeExpr(), Array2->getSizeExpr()))
return false;
if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
return false;
break;
}
case Type::DependentSizedArray: {
const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1);
const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2);
if (!IsStructurallyEquivalent(Context,
Array1->getSizeExpr(), Array2->getSizeExpr()))
return false;
if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
return false;
break;
}
case Type::DependentSizedExtVector: {
const DependentSizedExtVectorType *Vec1
= cast<DependentSizedExtVectorType>(T1);
const DependentSizedExtVectorType *Vec2
= cast<DependentSizedExtVectorType>(T2);
if (!IsStructurallyEquivalent(Context,
Vec1->getSizeExpr(), Vec2->getSizeExpr()))
return false;
if (!IsStructurallyEquivalent(Context,
Vec1->getElementType(),
Vec2->getElementType()))
return false;
break;
}
case Type::Vector:
case Type::ExtVector: {
const VectorType *Vec1 = cast<VectorType>(T1);
const VectorType *Vec2 = cast<VectorType>(T2);
if (!IsStructurallyEquivalent(Context,
Vec1->getElementType(),
Vec2->getElementType()))
return false;
if (Vec1->getNumElements() != Vec2->getNumElements())
return false;
if (Vec1->getVectorKind() != Vec2->getVectorKind())
return false;
break;
}
case Type::FunctionProto: {
const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
if (Proto1->getNumParams() != Proto2->getNumParams())
return false;
for (unsigned I = 0, N = Proto1->getNumParams(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context, Proto1->getParamType(I),
Proto2->getParamType(I)))
return false;
}
if (Proto1->isVariadic() != Proto2->isVariadic())
return false;
if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType())
return false;
if (Proto1->getExceptionSpecType() == EST_Dynamic) {
if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
return false;
for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context,
Proto1->getExceptionType(I),
Proto2->getExceptionType(I)))
return false;
}
} else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) {
if (!IsStructurallyEquivalent(Context,
Proto1->getNoexceptExpr(),
Proto2->getNoexceptExpr()))
return false;
}
if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
return false;
// Fall through to check the bits common with FunctionNoProtoType.
LLVM_FALLTHROUGH; // HLSL Change
}
case Type::FunctionNoProto: {
const FunctionType *Function1 = cast<FunctionType>(T1);
const FunctionType *Function2 = cast<FunctionType>(T2);
if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
Function2->getReturnType()))
return false;
if (Function1->getExtInfo() != Function2->getExtInfo())
return false;
break;
}
case Type::UnresolvedUsing:
if (!IsStructurallyEquivalent(Context,
cast<UnresolvedUsingType>(T1)->getDecl(),
cast<UnresolvedUsingType>(T2)->getDecl()))
return false;
break;
case Type::Attributed:
if (!IsStructurallyEquivalent(Context,
cast<AttributedType>(T1)->getModifiedType(),
cast<AttributedType>(T2)->getModifiedType()))
return false;
if (!IsStructurallyEquivalent(Context,
cast<AttributedType>(T1)->getEquivalentType(),
cast<AttributedType>(T2)->getEquivalentType()))
return false;
break;
case Type::Paren:
if (!IsStructurallyEquivalent(Context,
cast<ParenType>(T1)->getInnerType(),
cast<ParenType>(T2)->getInnerType()))
return false;
break;
case Type::Typedef:
if (!IsStructurallyEquivalent(Context,
cast<TypedefType>(T1)->getDecl(),
cast<TypedefType>(T2)->getDecl()))
return false;
break;
case Type::TypeOfExpr:
if (!IsStructurallyEquivalent(Context,
cast<TypeOfExprType>(T1)->getUnderlyingExpr(),
cast<TypeOfExprType>(T2)->getUnderlyingExpr()))
return false;
break;
case Type::TypeOf:
if (!IsStructurallyEquivalent(Context,
cast<TypeOfType>(T1)->getUnderlyingType(),
cast<TypeOfType>(T2)->getUnderlyingType()))
return false;
break;
case Type::UnaryTransform:
if (!IsStructurallyEquivalent(Context,
cast<UnaryTransformType>(T1)->getUnderlyingType(),
cast<UnaryTransformType>(T1)->getUnderlyingType()))
return false;
break;
case Type::Decltype:
if (!IsStructurallyEquivalent(Context,
cast<DecltypeType>(T1)->getUnderlyingExpr(),
cast<DecltypeType>(T2)->getUnderlyingExpr()))
return false;
break;
case Type::Auto:
if (!IsStructurallyEquivalent(Context,
cast<AutoType>(T1)->getDeducedType(),
cast<AutoType>(T2)->getDeducedType()))
return false;
break;
case Type::Record:
case Type::Enum:
if (!IsStructurallyEquivalent(Context,
cast<TagType>(T1)->getDecl(),
cast<TagType>(T2)->getDecl()))
return false;
break;
case Type::TemplateTypeParm: {
const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
if (Parm1->getDepth() != Parm2->getDepth())
return false;
if (Parm1->getIndex() != Parm2->getIndex())
return false;
if (Parm1->isParameterPack() != Parm2->isParameterPack())
return false;
// Names of template type parameters are never significant.
break;
}
case Type::SubstTemplateTypeParm: {
const SubstTemplateTypeParmType *Subst1
= cast<SubstTemplateTypeParmType>(T1);
const SubstTemplateTypeParmType *Subst2
= cast<SubstTemplateTypeParmType>(T2);
if (!IsStructurallyEquivalent(Context,
QualType(Subst1->getReplacedParameter(), 0),
QualType(Subst2->getReplacedParameter(), 0)))
return false;
if (!IsStructurallyEquivalent(Context,
Subst1->getReplacementType(),
Subst2->getReplacementType()))
return false;
break;
}
case Type::SubstTemplateTypeParmPack: {
const SubstTemplateTypeParmPackType *Subst1
= cast<SubstTemplateTypeParmPackType>(T1);
const SubstTemplateTypeParmPackType *Subst2
= cast<SubstTemplateTypeParmPackType>(T2);
if (!IsStructurallyEquivalent(Context,
QualType(Subst1->getReplacedParameter(), 0),
QualType(Subst2->getReplacedParameter(), 0)))
return false;
if (!IsStructurallyEquivalent(Context,
Subst1->getArgumentPack(),
Subst2->getArgumentPack()))
return false;
break;
}
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec1
= cast<TemplateSpecializationType>(T1);
const TemplateSpecializationType *Spec2
= cast<TemplateSpecializationType>(T2);
if (!IsStructurallyEquivalent(Context,
Spec1->getTemplateName(),
Spec2->getTemplateName()))
return false;
if (Spec1->getNumArgs() != Spec2->getNumArgs())
return false;
for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context,
Spec1->getArg(I), Spec2->getArg(I)))
return false;
}
break;
}
case Type::Elaborated: {
const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
// CHECKME: what if a keyword is ETK_None or ETK_typename ?
if (Elab1->getKeyword() != Elab2->getKeyword())
return false;
if (!IsStructurallyEquivalent(Context,
Elab1->getQualifier(),
Elab2->getQualifier()))
return false;
if (!IsStructurallyEquivalent(Context,
Elab1->getNamedType(),
Elab2->getNamedType()))
return false;
break;
}
case Type::InjectedClassName: {
const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1);
const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2);
if (!IsStructurallyEquivalent(Context,
Inj1->getInjectedSpecializationType(),
Inj2->getInjectedSpecializationType()))
return false;
break;
}
case Type::DependentName: {
const DependentNameType *Typename1 = cast<DependentNameType>(T1);
const DependentNameType *Typename2 = cast<DependentNameType>(T2);
if (!IsStructurallyEquivalent(Context,
Typename1->getQualifier(),
Typename2->getQualifier()))
return false;
if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
Typename2->getIdentifier()))
return false;
break;
}
case Type::DependentTemplateSpecialization: {
const DependentTemplateSpecializationType *Spec1 =
cast<DependentTemplateSpecializationType>(T1);
const DependentTemplateSpecializationType *Spec2 =
cast<DependentTemplateSpecializationType>(T2);
if (!IsStructurallyEquivalent(Context,
Spec1->getQualifier(),
Spec2->getQualifier()))
return false;
if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
Spec2->getIdentifier()))
return false;
if (Spec1->getNumArgs() != Spec2->getNumArgs())
return false;
for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context,
Spec1->getArg(I), Spec2->getArg(I)))
return false;
}
break;
}
case Type::PackExpansion:
if (!IsStructurallyEquivalent(Context,
cast<PackExpansionType>(T1)->getPattern(),
cast<PackExpansionType>(T2)->getPattern()))
return false;
break;
case Type::ObjCInterface: {
const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
if (!IsStructurallyEquivalent(Context,
Iface1->getDecl(), Iface2->getDecl()))
return false;
break;
}
case Type::ObjCObject: {
const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
if (!IsStructurallyEquivalent(Context,
Obj1->getBaseType(),
Obj2->getBaseType()))
return false;
if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
return false;
for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context,
Obj1->getProtocol(I),
Obj2->getProtocol(I)))
return false;
}
break;
}
case Type::ObjCObjectPointer: {
const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1);
const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2);
if (!IsStructurallyEquivalent(Context,
Ptr1->getPointeeType(),
Ptr2->getPointeeType()))
return false;
break;
}
case Type::Atomic: {
if (!IsStructurallyEquivalent(Context,
cast<AtomicType>(T1)->getValueType(),
cast<AtomicType>(T2)->getValueType()))
return false;
break;
}
} // end switch
return true;
}
/// \brief Determine structural equivalence of two fields.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FieldDecl *Field1, FieldDecl *Field2) {
RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
// For anonymous structs/unions, match up the anonymous struct/union type
// declarations directly, so that we don't go off searching for anonymous
// types
if (Field1->isAnonymousStructOrUnion() &&
Field2->isAnonymousStructOrUnion()) {
RecordDecl *D1 = Field1->getType()->castAs<RecordType>()->getDecl();
RecordDecl *D2 = Field2->getType()->castAs<RecordType>()->getDecl();
return IsStructurallyEquivalent(Context, D1, D2);
}
// Check for equivalent field names.
IdentifierInfo *Name1 = Field1->getIdentifier();
IdentifierInfo *Name2 = Field2->getIdentifier();
if (!::IsStructurallyEquivalent(Name1, Name2))
return false;
if (!IsStructurallyEquivalent(Context,
Field1->getType(), Field2->getType())) {
if (Context.Complain) {
Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(Owner2);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
<< Field1->getDeclName() << Field1->getType();
}
return false;
}
if (Field1->isBitField() != Field2->isBitField()) {
if (Context.Complain) {
Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(Owner2);
if (Field1->isBitField()) {
Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
<< Field1->getDeclName() << Field1->getType()
<< Field1->getBitWidthValue(Context.C1);
Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
<< Field2->getDeclName();
} else {
Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
<< Field2->getDeclName() << Field2->getType()
<< Field2->getBitWidthValue(Context.C2);
Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
<< Field1->getDeclName();
}
}
return false;
}
if (Field1->isBitField()) {
// Make sure that the bit-fields are the same length.
unsigned Bits1 = Field1->getBitWidthValue(Context.C1);
unsigned Bits2 = Field2->getBitWidthValue(Context.C2);
if (Bits1 != Bits2) {
if (Context.Complain) {
Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(Owner2);
Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
<< Field2->getDeclName() << Field2->getType() << Bits2;
Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
<< Field1->getDeclName() << Field1->getType() << Bits1;
}
return false;
}
}
return true;
}
/// \brief Find the index of the given anonymous struct/union within its
/// context.
///
/// \returns Returns the index of this anonymous struct/union in its context,
/// including the next assigned index (if none of them match). Returns an
/// empty option if the context is not a record, i.e.. if the anonymous
/// struct/union is at namespace or block scope.
static Optional<unsigned> findAnonymousStructOrUnionIndex(RecordDecl *Anon) {
ASTContext &Context = Anon->getASTContext();
QualType AnonTy = Context.getRecordType(Anon);
RecordDecl *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
if (!Owner)
return None;
unsigned Index = 0;
for (const auto *D : Owner->noload_decls()) {
const auto *F = dyn_cast<FieldDecl>(D);
if (!F || !F->isAnonymousStructOrUnion())
continue;
if (Context.hasSameType(F->getType(), AnonTy))
break;
++Index;
}
return Index;
}
/// \brief Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
if (D1->isUnion() != D2->isUnion()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
<< D1->getDeclName() << (unsigned)D1->getTagKind();
}
return false;
}
if (D1->isAnonymousStructOrUnion() && D2->isAnonymousStructOrUnion()) {
// If both anonymous structs/unions are in a record context, make sure
// they occur in the same location in the context records.
if (Optional<unsigned> Index1 = findAnonymousStructOrUnionIndex(D1)) {
if (Optional<unsigned> Index2 = findAnonymousStructOrUnionIndex(D2)) {
if (*Index1 != *Index2)
return false;
}
}
}
// If both declarations are class template specializations, we know
// the ODR applies, so check the template and template arguments.
ClassTemplateSpecializationDecl *Spec1
= dyn_cast<ClassTemplateSpecializationDecl>(D1);
ClassTemplateSpecializationDecl *Spec2
= dyn_cast<ClassTemplateSpecializationDecl>(D2);
if (Spec1 && Spec2) {
// Check that the specialized templates are the same.
if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
Spec2->getSpecializedTemplate()))
return false;
// Check that the template arguments are the same.
if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size())
return false;
for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I)
if (!IsStructurallyEquivalent(Context,
Spec1->getTemplateArgs().get(I),
Spec2->getTemplateArgs().get(I)))
return false;
}
// If one is a class template specialization and the other is not, these
// structures are different.
else if (Spec1 || Spec2)
return false;
// Compare the definitions of these two records. If either or both are
// incomplete, we assume that they are equivalent.
D1 = D1->getDefinition();
D2 = D2->getDefinition();
if (!D1 || !D2)
return true;
if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
<< D2CXX->getNumBases();
Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
<< D1CXX->getNumBases();
}
return false;
}
// Check the base classes.
for (CXXRecordDecl::base_class_iterator Base1 = D1CXX->bases_begin(),
BaseEnd1 = D1CXX->bases_end(),
Base2 = D2CXX->bases_begin();
Base1 != BaseEnd1;
++Base1, ++Base2) {
if (!IsStructurallyEquivalent(Context,
Base1->getType(), Base2->getType())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
<< Base2->getType()
<< Base2->getSourceRange();
Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->getType()
<< Base1->getSourceRange();
}
return false;
}
// Check virtual vs. non-virtual inheritance mismatch.
if (Base1->isVirtual() != Base2->isVirtual()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(Base2->getLocStart(),
diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->isVirtual()
<< Base1->getSourceRange();
}
return false;
}
}
} else if (D1CXX->getNumBases() > 0) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->getType()
<< Base1->getSourceRange();
Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
}
return false;
}
}
// Check the fields for consistency.
RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
for (RecordDecl::field_iterator Field1 = D1->field_begin(),
Field1End = D1->field_end();
Field1 != Field1End;
++Field1, ++Field2) {
if (Field2 == Field2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
<< Field1->getDeclName() << Field1->getType();
Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
}
return false;
}
if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
return false;
}
if (Field2 != Field2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
}
return false;
}
return true;
}
/// \brief Determine structural equivalence of two enums.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EnumDecl *D1, EnumDecl *D2) {
EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(),
EC2End = D2->enumerator_end();
for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(),
EC1End = D1->enumerator_end();
EC1 != EC1End; ++EC1, ++EC2) {
if (EC2 == EC2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
<< EC1->getDeclName()
<< EC1->getInitVal().toString(10);
Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
}
return false;
}
llvm::APSInt Val1 = EC1->getInitVal();
llvm::APSInt Val2 = EC2->getInitVal();
if (!llvm::APSInt::isSameValue(Val1, Val2) ||
!IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
<< EC2->getDeclName()
<< EC2->getInitVal().toString(10);
Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
<< EC1->getDeclName()
<< EC1->getInitVal().toString(10);
}
return false;
}
}
if (EC2 != EC2End) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
<< EC2->getDeclName()
<< EC2->getInitVal().toString(10);
Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
}
return false;
}
return true;
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateParameterList *Params1,
TemplateParameterList *Params2) {
if (Params1->size() != Params2->size()) {
if (Context.Complain) {
Context.Diag2(Params2->getTemplateLoc(),
diag::err_odr_different_num_template_parameters)
<< Params1->size() << Params2->size();
Context.Diag1(Params1->getTemplateLoc(),
diag::note_odr_template_parameter_list);
}
return false;
}
for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
if (Context.Complain) {
Context.Diag2(Params2->getParam(I)->getLocation(),
diag::err_odr_different_template_parameter_kind);
Context.Diag1(Params1->getParam(I)->getLocation(),
diag::note_odr_template_parameter_here);
}
return false;
}
if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
Params2->getParam(I))) {
return false;
}
}
return true;
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTypeParmDecl *D1,
TemplateTypeParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
}
return false;
}
return true;
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NonTypeTemplateParmDecl *D1,
NonTypeTemplateParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
}
return false;
}
// Check types.
if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
diag::err_odr_non_type_parameter_type_inconsistent)
<< D2->getType() << D1->getType();
Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
<< D1->getType();
}
return false;
}
return true;
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
TemplateTemplateParmDecl *D1,
TemplateTemplateParmDecl *D2) {
if (D1->isParameterPack() != D2->isParameterPack()) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
<< D2->isParameterPack();
Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
<< D1->isParameterPack();
}
return false;
}
// Check template parameter lists.
return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
D2->getTemplateParameters());
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
ClassTemplateDecl *D1,
ClassTemplateDecl *D2) {
// Check template parameters.
if (!IsStructurallyEquivalent(Context,
D1->getTemplateParameters(),
D2->getTemplateParameters()))
return false;
// Check the templated declaration.
return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
D2->getTemplatedDecl());
}
/// \brief Determine structural equivalence of two declarations.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2) {
// FIXME: Check for known structural equivalences via a callback of some sort.
// Check whether we already know that these two declarations are not
// structurally equivalent.
if (Context.NonEquivalentDecls.count(std::make_pair(D1->getCanonicalDecl(),
D2->getCanonicalDecl())))
return false;
// Determine whether we've already produced a tentative equivalence for D1.
Decl *&EquivToD1 = Context.TentativeEquivalences[D1->getCanonicalDecl()];
if (EquivToD1)
return EquivToD1 == D2->getCanonicalDecl();
// Produce a tentative equivalence D1 <-> D2, which will be checked later.
EquivToD1 = D2->getCanonicalDecl();
Context.DeclsToCheck.push_back(D1->getCanonicalDecl());
return true;
}
bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1,
Decl *D2) {
if (!::IsStructurallyEquivalent(*this, D1, D2))
return false;
return !Finish();
}
bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1,
QualType T2) {
if (!::IsStructurallyEquivalent(*this, T1, T2))
return false;
return !Finish();
}
bool StructuralEquivalenceContext::Finish() {
while (!DeclsToCheck.empty()) {
// Check the next declaration.
Decl *D1 = DeclsToCheck.front();
DeclsToCheck.pop_front();
Decl *D2 = TentativeEquivalences[D1];
assert(D2 && "Unrecorded tentative equivalence?");
bool Equivalent = true;
// FIXME: Switch on all declaration kinds. For now, we're just going to
// check the obvious ones.
if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) {
if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) {
// Check for equivalent structure names.
IdentifierInfo *Name1 = Record1->getIdentifier();
if (!Name1 && Record1->getTypedefNameForAnonDecl())
Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
IdentifierInfo *Name2 = Record2->getIdentifier();
if (!Name2 && Record2->getTypedefNameForAnonDecl())
Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
if (!::IsStructurallyEquivalent(Name1, Name2) ||
!::IsStructurallyEquivalent(*this, Record1, Record2))
Equivalent = false;
} else {
// Record/non-record mismatch.
Equivalent = false;
}
} else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) {
if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) {
// Check for equivalent enum names.
IdentifierInfo *Name1 = Enum1->getIdentifier();
if (!Name1 && Enum1->getTypedefNameForAnonDecl())
Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
IdentifierInfo *Name2 = Enum2->getIdentifier();
if (!Name2 && Enum2->getTypedefNameForAnonDecl())
Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
if (!::IsStructurallyEquivalent(Name1, Name2) ||
!::IsStructurallyEquivalent(*this, Enum1, Enum2))
Equivalent = false;
} else {
// Enum/non-enum mismatch
Equivalent = false;
}
} else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
Typedef2->getIdentifier()) ||
!::IsStructurallyEquivalent(*this,
Typedef1->getUnderlyingType(),
Typedef2->getUnderlyingType()))
Equivalent = false;
} else {
// Typedef/non-typedef mismatch.
Equivalent = false;
}
} else if (ClassTemplateDecl *ClassTemplate1
= dyn_cast<ClassTemplateDecl>(D1)) {
if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
ClassTemplate2->getIdentifier()) ||
!::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
Equivalent = false;
} else {
// Class template/non-class-template mismatch.
Equivalent = false;
}
} else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) {
if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
} else if (NonTypeTemplateParmDecl *NTTP1
= dyn_cast<NonTypeTemplateParmDecl>(D1)) {
if (NonTypeTemplateParmDecl *NTTP2
= dyn_cast<NonTypeTemplateParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
} else if (TemplateTemplateParmDecl *TTP1
= dyn_cast<TemplateTemplateParmDecl>(D1)) {
if (TemplateTemplateParmDecl *TTP2
= dyn_cast<TemplateTemplateParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
}
if (!Equivalent) {
// Note that these two declarations are not equivalent (and we already
// know about it).
NonEquivalentDecls.insert(std::make_pair(D1->getCanonicalDecl(),
D2->getCanonicalDecl()));
return true;
}
// FIXME: Check other declaration kinds!
}
return false;
}
//----------------------------------------------------------------------------
// Import Types
//----------------------------------------------------------------------------
QualType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
return QualType();
}
QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
#include "clang/AST/BuiltinTypes.def"
// FIXME: for Char16, Char32, and NullPtr, make sure that the "to"
// context supports C++.
// FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to"
// context supports ObjC.
case BuiltinType::Char_U:
// The context we're importing from has an unsigned 'char'. If we're
// importing into a context with a signed 'char', translate to
// 'unsigned char' instead.
if (Importer.getToContext().getLangOpts().CharIsSigned)
return Importer.getToContext().UnsignedCharTy;
return Importer.getToContext().CharTy;
case BuiltinType::Char_S:
// The context we're importing from has an unsigned 'char'. If we're
// importing into a context with a signed 'char', translate to
// 'unsigned char' instead.
if (!Importer.getToContext().getLangOpts().CharIsSigned)
return Importer.getToContext().SignedCharTy;
return Importer.getToContext().CharTy;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
// FIXME: If not in C++, shall we translate to the C equivalent of
// wchar_t?
return Importer.getToContext().WCharTy;
}
llvm_unreachable("Invalid BuiltinType Kind!");
}
QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getComplexType(ToElementType);
}
QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
return Importer.getToContext().getPointerType(ToPointeeType);
}
QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
// FIXME: Check for blocks support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
return Importer.getToContext().getBlockPointerType(ToPointeeType);
}
QualType
ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
return QualType();
return Importer.getToContext().getLValueReferenceType(ToPointeeType);
}
QualType
ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
// FIXME: Check for C++0x support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
return QualType();
return Importer.getToContext().getRValueReferenceType(ToPointeeType);
}
QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
return Importer.getToContext().getMemberPointerType(ToPointeeType,
ClassType.getTypePtr());
}
QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getConstantArrayType(ToElementType,
T->getSize(),
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
QualType
ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getIncompleteArrayType(ToElementType,
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
Expr *Size = Importer.Import(T->getSizeExpr());
if (!Size)
return QualType();
SourceRange Brackets = Importer.Import(T->getBracketsRange());
return Importer.getToContext().getVariableArrayType(ToElementType, Size,
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers(),
Brackets);
}
QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getVectorType(ToElementType,
T->getNumElements(),
T->getVectorKind());
}
QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
return QualType();
return Importer.getToContext().getExtVectorType(ToElementType,
T->getNumElements());
}
QualType
ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// FIXME: What happens if we're importing a function without a prototype
// into C++? Should we make it variadic?
QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
return QualType();
return Importer.getToContext().getFunctionNoProtoType(ToResultType,
T->getExtInfo());
}
QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
return QualType();
// Import argument types
SmallVector<QualType, 4> ArgTypes;
for (const auto &A : T->param_types()) {
QualType ArgType = Importer.Import(A);
if (ArgType.isNull())
return QualType();
ArgTypes.push_back(ArgType);
}
// Import exception types
SmallVector<QualType, 4> ExceptionTypes;
for (const auto &E : T->exceptions()) {
QualType ExceptionType = Importer.Import(E);
if (ExceptionType.isNull())
return QualType();
ExceptionTypes.push_back(ExceptionType);
}
FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo ToEPI;
ToEPI.ExtInfo = FromEPI.ExtInfo;
ToEPI.Variadic = FromEPI.Variadic;
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
ToEPI.TypeQuals = FromEPI.TypeQuals;
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
ToEPI.ExceptionSpec.NoexceptExpr =
Importer.Import(FromEPI.ExceptionSpec.NoexceptExpr);
ToEPI.ExceptionSpec.SourceDecl = cast_or_null<FunctionDecl>(
Importer.Import(FromEPI.ExceptionSpec.SourceDecl));
ToEPI.ExceptionSpec.SourceTemplate = cast_or_null<FunctionDecl>(
Importer.Import(FromEPI.ExceptionSpec.SourceTemplate));
return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI, None); // HLSL Change - add param modifiers
}
QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
QualType ToInnerType = Importer.Import(T->getInnerType());
if (ToInnerType.isNull())
return QualType();
return Importer.getToContext().getParenType(ToInnerType);
}
QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
TypedefNameDecl *ToDecl
= dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
return QualType();
return Importer.getToContext().getTypeDeclType(ToDecl);
}
QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
return QualType();
return Importer.getToContext().getTypeOfExprType(ToExpr);
}
QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
if (ToUnderlyingType.isNull())
return QualType();
return Importer.getToContext().getTypeOfType(ToUnderlyingType);
}
QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
// FIXME: Make sure that the "to" context supports C++0x!
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
return QualType();
QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
if (UnderlyingType.isNull())
return QualType();
return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
}
QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
QualType ToBaseType = Importer.Import(T->getBaseType());
QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
if (ToBaseType.isNull() || ToUnderlyingType.isNull())
return QualType();
return Importer.getToContext().getUnaryTransformType(ToBaseType,
ToUnderlyingType,
T->getUTTKind());
}
QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
// FIXME: Make sure that the "to" context supports C++11!
QualType FromDeduced = T->getDeducedType();
QualType ToDeduced;
if (!FromDeduced.isNull()) {
ToDeduced = Importer.Import(FromDeduced);
if (ToDeduced.isNull())
return QualType();
}
return Importer.getToContext().getAutoType(ToDeduced, T->isDecltypeAuto(),
/*IsDependent*/false);
}
QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
RecordDecl *ToDecl
= dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
return QualType();
return Importer.getToContext().getTagDeclType(ToDecl);
}
QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
EnumDecl *ToDecl
= dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
return QualType();
return Importer.getToContext().getTagDeclType(ToDecl);
}
QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
QualType FromModifiedType = T->getModifiedType();
QualType FromEquivalentType = T->getEquivalentType();
QualType ToModifiedType;
QualType ToEquivalentType;
if (!FromModifiedType.isNull()) {
ToModifiedType = Importer.Import(FromModifiedType);
if (ToModifiedType.isNull())
return QualType();
}
if (!FromEquivalentType.isNull()) {
ToEquivalentType = Importer.Import(FromEquivalentType);
if (ToEquivalentType.isNull())
return QualType();
}
return Importer.getToContext().getAttributedType(T->getAttrKind(),
ToModifiedType, ToEquivalentType);
}
QualType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
TemplateName ToTemplate = Importer.Import(T->getTemplateName());
if (ToTemplate.isNull())
return QualType();
SmallVector<TemplateArgument, 2> ToTemplateArgs;
if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
return QualType();
QualType ToCanonType;
if (!QualType(T, 0).isCanonical()) {
QualType FromCanonType
= Importer.getFromContext().getCanonicalType(QualType(T, 0));
ToCanonType =Importer.Import(FromCanonType);
if (ToCanonType.isNull())
return QualType();
}
return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
ToTemplateArgs.data(),
ToTemplateArgs.size(),
ToCanonType);
}
QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
NestedNameSpecifier *ToQualifier = nullptr;
// Note: the qualifier in an ElaboratedType is optional.
if (T->getQualifier()) {
ToQualifier = Importer.Import(T->getQualifier());
if (!ToQualifier)
return QualType();
}
QualType ToNamedType = Importer.Import(T->getNamedType());
if (ToNamedType.isNull())
return QualType();
return Importer.getToContext().getElaboratedType(T->getKeyword(),
ToQualifier, ToNamedType);
}
QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
ObjCInterfaceDecl *Class
= dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
if (!Class)
return QualType();
return Importer.getToContext().getObjCInterfaceType(Class);
}
QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
QualType ToBaseType = Importer.Import(T->getBaseType());
if (ToBaseType.isNull())
return QualType();
SmallVector<QualType, 4> TypeArgs;
for (auto TypeArg : T->getTypeArgsAsWritten()) {
QualType ImportedTypeArg = Importer.Import(TypeArg);
if (ImportedTypeArg.isNull())
return QualType();
TypeArgs.push_back(ImportedTypeArg);
}
SmallVector<ObjCProtocolDecl *, 4> Protocols;
for (auto *P : T->quals()) {
ObjCProtocolDecl *Protocol
= dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
if (!Protocol)
return QualType();
Protocols.push_back(Protocol);
}
return Importer.getToContext().getObjCObjectType(ToBaseType, TypeArgs,
Protocols,
T->isKindOfTypeAsWritten());
}
QualType
ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
return QualType();
return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
}
//----------------------------------------------------------------------------
// Import Declarations
//----------------------------------------------------------------------------
bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclContext *&LexicalDC,
DeclarationName &Name,
NamedDecl *&ToD,
SourceLocation &Loc) {
// Import the context of this declaration.
DC = Importer.ImportContext(D->getDeclContext());
if (!DC)
return true;
LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return true;
}
// Import the name of this declaration.
Name = Importer.Import(D->getDeclName());
if (D->getDeclName() && !Name)
return true;
// Import the location of this declaration.
Loc = Importer.Import(D->getLocation());
ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D));
return false;
}
void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
if (!FromD)
return;
if (!ToD) {
ToD = Importer.Import(FromD);
if (!ToD)
return;
}
if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) {
if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) {
ImportDefinition(FromRecord, ToRecord);
}
}
return;
}
if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) {
if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
ImportDefinition(FromEnum, ToEnum);
}
}
return;
}
}
void
ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
DeclarationNameInfo& To) {
// NOTE: To.Name and To.Loc are already imported.
// We only have to import To.LocInfo.
switch (To.getName().getNameKind()) {
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXUsingDirective:
return;
case DeclarationName::CXXOperatorName: {
SourceRange Range = From.getCXXOperatorNameRange();
To.setCXXOperatorNameRange(Importer.Import(Range));
return;
}
case DeclarationName::CXXLiteralOperatorName: {
SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
return;
}
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName: {
TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
To.setNamedTypeInfo(Importer.Import(FromTInfo));
return;
}
}
llvm_unreachable("Unknown name kind.");
}
void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
if (Importer.isMinimalImport() && !ForceImport) {
Importer.ImportContext(FromDC);
return;
}
for (auto *From : FromDC->decls())
Importer.Import(From);
}
bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
ImportDeclContext(From, /*ForceImport=*/true);
return false;
}
To->startDefinition();
// Add base classes.
if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor;
ToData.UserDeclaredSpecialMembers = FromData.UserDeclaredSpecialMembers;
ToData.Aggregate = FromData.Aggregate;
ToData.PlainOldData = FromData.PlainOldData;
ToData.Empty = FromData.Empty;
ToData.Polymorphic = FromData.Polymorphic;
ToData.Abstract = FromData.Abstract;
ToData.IsStandardLayout = FromData.IsStandardLayout;
ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
ToData.HasPrivateFields = FromData.HasPrivateFields;
ToData.HasProtectedFields = FromData.HasProtectedFields;
ToData.HasPublicFields = FromData.HasPublicFields;
ToData.HasMutableFields = FromData.HasMutableFields;
ToData.HasVariantMembers = FromData.HasVariantMembers;
ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
ToData.HasInClassInitializer = FromData.HasInClassInitializer;
ToData.HasUninitializedReferenceMember
= FromData.HasUninitializedReferenceMember;
ToData.NeedOverloadResolutionForMoveConstructor
= FromData.NeedOverloadResolutionForMoveConstructor;
ToData.NeedOverloadResolutionForMoveAssignment
= FromData.NeedOverloadResolutionForMoveAssignment;
ToData.NeedOverloadResolutionForDestructor
= FromData.NeedOverloadResolutionForDestructor;
ToData.DefaultedMoveConstructorIsDeleted
= FromData.DefaultedMoveConstructorIsDeleted;
ToData.DefaultedMoveAssignmentIsDeleted
= FromData.DefaultedMoveAssignmentIsDeleted;
ToData.DefaultedDestructorIsDeleted = FromData.DefaultedDestructorIsDeleted;
ToData.HasTrivialSpecialMembers = FromData.HasTrivialSpecialMembers;
ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor;
ToData.HasConstexprNonCopyMoveConstructor
= FromData.HasConstexprNonCopyMoveConstructor;
ToData.DefaultedDefaultConstructorIsConstexpr
= FromData.DefaultedDefaultConstructorIsConstexpr;
ToData.HasConstexprDefaultConstructor
= FromData.HasConstexprDefaultConstructor;
ToData.HasNonLiteralTypeFieldsOrBases
= FromData.HasNonLiteralTypeFieldsOrBases;
// ComputedVisibleConversions not imported.
ToData.UserProvidedDefaultConstructor
= FromData.UserProvidedDefaultConstructor;
ToData.DeclaredSpecialMembers = FromData.DeclaredSpecialMembers;
ToData.ImplicitCopyConstructorHasConstParam
= FromData.ImplicitCopyConstructorHasConstParam;
ToData.ImplicitCopyAssignmentHasConstParam
= FromData.ImplicitCopyAssignmentHasConstParam;
ToData.HasDeclaredCopyConstructorWithConstParam
= FromData.HasDeclaredCopyConstructorWithConstParam;
ToData.HasDeclaredCopyAssignmentWithConstParam
= FromData.HasDeclaredCopyAssignmentWithConstParam;
ToData.IsLambda = FromData.IsLambda;
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (const auto &Base1 : FromCXX->bases()) {
QualType T = Importer.Import(Base1.getType());
if (T.isNull())
return true;
SourceLocation EllipsisLoc;
if (Base1.isPackExpansion())
EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
// Ensure that we have a definition for the base.
ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
Bases.push_back(
new (Importer.getToContext())
CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
Base1.isVirtual(),
Base1.isBaseOfClass(),
Base1.getAccessSpecifierAsWritten(),
Importer.Import(Base1.getTypeSourceInfo()),
EllipsisLoc));
}
if (!Bases.empty())
ToCXX->setBases(Bases.data(), Bases.size());
}
if (shouldForceImportDeclContext(Kind))
ImportDeclContext(From, /*ForceImport=*/true);
To->completeDefinition();
return false;
}
bool ASTNodeImporter::ImportDefinition(VarDecl *From, VarDecl *To,
ImportDefinitionKind Kind) {
if (To->getAnyInitializer())
return false;
// FIXME: Can we really import any initializer? Alternatively, we could force
// ourselves to import every declaration of a variable and then only use
// getInit() here.
To->setInit(Importer.Import(const_cast<Expr *>(From->getAnyInitializer())));
// FIXME: Other bits to merge?
return false;
}
bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
ImportDeclContext(From, /*ForceImport=*/true);
return false;
}
To->startDefinition();
QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
if (T.isNull())
return true;
QualType ToPromotionType = Importer.Import(From->getPromotionType());
if (ToPromotionType.isNull())
return true;
if (shouldForceImportDeclContext(Kind))
ImportDeclContext(From, /*ForceImport=*/true);
// FIXME: we might need to merge the number of positive or negative bits
// if the enumerator lists don't match.
To->completeDefinition(T, ToPromotionType,
From->getNumPositiveBits(),
From->getNumNegativeBits());
return false;
}
TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
TemplateParameterList *Params) {
SmallVector<NamedDecl *, 4> ToParams;
ToParams.reserve(Params->size());
for (TemplateParameterList::iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
Decl *To = Importer.Import(*P);
if (!To)
return nullptr;
ToParams.push_back(cast<NamedDecl>(To));
}
return TemplateParameterList::Create(Importer.getToContext(),
Importer.Import(Params->getTemplateLoc()),
Importer.Import(Params->getLAngleLoc()),
ToParams.data(), ToParams.size(),
Importer.Import(Params->getRAngleLoc()));
}
TemplateArgument
ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
switch (From.getKind()) {
case TemplateArgument::Null:
return TemplateArgument();
case TemplateArgument::Type: {
QualType ToType = Importer.Import(From.getAsType());
if (ToType.isNull())
return TemplateArgument();
return TemplateArgument(ToType);
}
case TemplateArgument::Integral: {
QualType ToType = Importer.Import(From.getIntegralType());
if (ToType.isNull())
return TemplateArgument();
return TemplateArgument(From, ToType);
}
case TemplateArgument::Declaration: {
ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
QualType ToType = Importer.Import(From.getParamTypeForDecl());
if (!To || ToType.isNull())
return TemplateArgument();
return TemplateArgument(To, ToType);
}
case TemplateArgument::NullPtr: {
QualType ToType = Importer.Import(From.getNullPtrType());
if (ToType.isNull())
return TemplateArgument();
return TemplateArgument(ToType, /*isNullPtr*/true);
}
case TemplateArgument::Template: {
TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
if (ToTemplate.isNull())
return TemplateArgument();
return TemplateArgument(ToTemplate);
}
case TemplateArgument::TemplateExpansion: {
TemplateName ToTemplate
= Importer.Import(From.getAsTemplateOrTemplatePattern());
if (ToTemplate.isNull())
return TemplateArgument();
return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
}
case TemplateArgument::Expression:
if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
return TemplateArgument(ToExpr);
return TemplateArgument();
case TemplateArgument::Pack: {
SmallVector<TemplateArgument, 2> ToPack;
ToPack.reserve(From.pack_size());
if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
return TemplateArgument();
TemplateArgument *ToArgs
= new (Importer.getToContext()) TemplateArgument[ToPack.size()];
std::copy(ToPack.begin(), ToPack.end(), ToArgs);
return TemplateArgument(ToArgs, ToPack.size());
}
}
llvm_unreachable("Invalid template argument kind");
}
bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs) {
for (unsigned I = 0; I != NumFromArgs; ++I) {
TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
if (To.isNull() && !FromArgs[I].isNull())
return true;
ToArgs.push_back(To);
}
return false;
}
bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
RecordDecl *ToRecord, bool Complain) {
// Eliminate a potential failure point where we attempt to re-import
// something we're trying to import while completing ToRecord.
Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord);
if (ToOrigin) {
RecordDecl *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin);
if (ToOriginRecord)
ToRecord = ToOriginRecord;
}
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
ToRecord->getASTContext(),
Importer.getNonEquivalentDecls(),
false, Complain);
return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
}
bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
bool Complain) {
StructuralEquivalenceContext Ctx(
Importer.getFromContext(), Importer.getToContext(),
Importer.getNonEquivalentDecls(), false, Complain);
return Ctx.IsStructurallyEquivalent(FromVar, ToVar);
}
bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
Importer.getNonEquivalentDecls());
return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
}
bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC,
EnumConstantDecl *ToEC)
{
const llvm::APSInt &FromVal = FromEC->getInitVal();
const llvm::APSInt &ToVal = ToEC->getInitVal();
return FromVal.isSigned() == ToVal.isSigned() &&
FromVal.getBitWidth() == ToVal.getBitWidth() &&
FromVal == ToVal;
}
bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
ClassTemplateDecl *To) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
Importer.getNonEquivalentDecls());
return Ctx.IsStructurallyEquivalent(From, To);
}
bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
VarTemplateDecl *To) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
Importer.getNonEquivalentDecls());
return Ctx.IsStructurallyEquivalent(From, To);
}
Decl *ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
return nullptr;
}
Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
TranslationUnitDecl *ToD =
Importer.getToContext().getTranslationUnitDecl();
Importer.Imported(D, ToD);
return ToD;
}
Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
NamespaceDecl *MergeWithNamespace = nullptr;
if (!Name) {
// This is an anonymous namespace. Adopt an existing anonymous
// namespace if we can.
// FIXME: Not testable.
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
MergeWithNamespace = TU->getAnonymousNamespace();
else
MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
} else {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
continue;
if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) {
MergeWithNamespace = FoundNS;
ConflictingDecls.clear();
break;
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
ConflictingDecls.data(),
ConflictingDecls.size());
}
}
// Create the "to" namespace, if needed.
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
D->isInline(),
Importer.Import(D->getLocStart()),
Loc, Name.getAsIdentifierInfo(),
/*PrevDecl=*/nullptr);
ToNamespace->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToNamespace);
// If this is an anonymous namespace, register it as the anonymous
// namespace within its context.
if (!Name) {
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
TU->setAnonymousNamespace(ToNamespace);
else
cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
}
}
Importer.Imported(D, ToNamespace);
ImportDeclContext(D);
return ToNamespace;
}
Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// If this typedef is not in block scope, determine whether we've
// seen a typedef with the same name (that we can merge with) or any
// other entity by that name (which name lookup could conflict with).
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
if (TypedefNameDecl *FoundTypedef =
dyn_cast<TypedefNameDecl>(FoundDecls[I])) {
if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
FoundTypedef->getUnderlyingType()))
return Importer.Imported(D, FoundTypedef);
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
return nullptr;
}
}
// Import the underlying type of this typedef;
QualType T = Importer.Import(D->getUnderlyingType());
if (T.isNull())
return nullptr;
// Create the new typedef node.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
SourceLocation StartL = Importer.Import(D->getLocStart());
TypedefNameDecl *ToTypedef;
if (IsAlias)
ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC,
StartL, Loc,
Name.getAsIdentifierInfo(),
TInfo);
else
ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
StartL, Loc,
Name.getAsIdentifierInfo(),
TInfo);
ToTypedef->setAccess(D->getAccess());
ToTypedef->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToTypedef);
LexicalDC->addDeclInternal(ToTypedef);
return ToTypedef;
}
Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/false);
}
Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/true);
}
Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// Import the major distinguishing characteristics of this enum.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Figure out what enum name we're looking for.
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
// We may already have an enum of the same name; try to find and match it.
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
Decl *Found = FoundDecls[I];
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
Found = Tag->getDecl();
}
if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) {
if (IsStructuralMatch(D, FoundEnum))
return Importer.Imported(D, FoundEnum);
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
}
}
// Create the enum declaration.
EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getLocStart()),
Loc, Name.getAsIdentifierInfo(), nullptr,
D->isScoped(), D->isScopedUsingClassTag(),
D->isFixed());
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, D2);
LexicalDC->addDeclInternal(D2);
// Import the integer type.
QualType ToIntegerType = Importer.Import(D->getIntegerType());
if (ToIntegerType.isNull())
return nullptr;
D2->setIntegerType(ToIntegerType);
// Import the definition
if (D->isCompleteDefinition() && ImportDefinition(D, D2))
return nullptr;
return D2;
}
Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
// Import the major distinguishing characteristics of this record.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Figure out what structure name we're looking for.
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
// We may already have a record of the same name; try to find and match it.
RecordDecl *AdoptDecl = nullptr;
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
Decl *Found = FoundDecls[I];
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
Found = Tag->getDecl();
}
if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
if (D->isAnonymousStructOrUnion() &&
FoundRecord->isAnonymousStructOrUnion()) {
// If both anonymous structs/unions are in a record context, make sure
// they occur in the same location in the context records.
if (Optional<unsigned> Index1
= findAnonymousStructOrUnionIndex(D)) {
if (Optional<unsigned> Index2 =
findAnonymousStructOrUnionIndex(FoundRecord)) {
if (*Index1 != *Index2)
continue;
}
}
}
if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
if ((SearchName && !D->isCompleteDefinition())
|| (D->isCompleteDefinition() &&
D->isAnonymousStructOrUnion()
== FoundDef->isAnonymousStructOrUnion() &&
IsStructuralMatch(D, FoundDef))) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
// FIXME: For C++, we should also merge methods here.
return Importer.Imported(D, FoundDef);
}
} else if (!D->isCompleteDefinition()) {
// We have a forward declaration of this type, so adopt that forward
// declaration rather than building a new one.
// If one or both can be completed from external storage then try one
// last time to complete and compare them before doing this.
if (FoundRecord->hasExternalLexicalStorage() &&
!FoundRecord->isCompleteDefinition())
FoundRecord->getASTContext().getExternalSource()->CompleteType(FoundRecord);
if (D->hasExternalLexicalStorage())
D->getASTContext().getExternalSource()->CompleteType(D);
if (FoundRecord->isCompleteDefinition() &&
D->isCompleteDefinition() &&
!IsStructuralMatch(D, FoundRecord))
continue;
AdoptDecl = FoundRecord;
continue;
} else if (!SearchName) {
continue;
}
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty() && SearchName) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
}
}
// Create the record declaration.
RecordDecl *D2 = AdoptDecl;
SourceLocation StartLoc = Importer.Import(D->getLocStart());
if (!D2) {
if (isa<CXXRecordDecl>(D)) {
CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
D->getTagKind(),
DC, StartLoc, Loc,
Name.getAsIdentifierInfo());
D2 = D2CXX;
D2->setAccess(D->getAccess());
} else {
D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
DC, StartLoc, Loc, Name.getAsIdentifierInfo());
}
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
if (D->isAnonymousStructOrUnion())
D2->setAnonymousStructOrUnion(true);
}
Importer.Imported(D, D2);
if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
return nullptr;
return D2;
}
Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
// Import the major distinguishing characteristics of this enumerator.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
// Determine whether there are any other declarations with the same name and
// in the same context.
if (!LexicalDC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
if (EnumConstantDecl *FoundEnumConstant
= dyn_cast<EnumConstantDecl>(FoundDecls[I])) {
if (IsStructuralMatch(D, FoundEnumConstant))
return Importer.Imported(D, FoundEnumConstant);
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
return nullptr;
}
}
Expr *Init = Importer.Import(D->getInitExpr());
if (D->getInitExpr() && !Init)
return nullptr;
EnumConstantDecl *ToEnumerator
= EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
Name.getAsIdentifierInfo(), T,
Init, D->getInitVal());
ToEnumerator->setAccess(D->getAccess());
ToEnumerator->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToEnumerator);
LexicalDC->addDeclInternal(ToEnumerator);
return ToEnumerator;
}
Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Import the major distinguishing characteristics of this function.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Try to find a function in our own ("to") context with the same name, same
// type, and in the same context as the function we're importing.
if (!LexicalDC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundFunction->getType())) {
// FIXME: Actually try to merge the body and other attributes.
return Importer.Imported(D, FoundFunction);
}
// FIXME: Check for overloading more carefully, e.g., by boosting
// Sema::IsOverload out to the AST library.
// Function overloading is okay in C++.
if (Importer.getToContext().getLangOpts().CPlusPlus)
continue;
// Complain about inconsistent function types.
Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent)
<< Name << D->getType() << FoundFunction->getType();
Importer.ToDiag(FoundFunction->getLocation(),
diag::note_odr_value_here)
<< FoundFunction->getType();
}
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
return nullptr;
}
}
DeclarationNameInfo NameInfo(Name, Loc);
// Import additional name location/type info.
ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
QualType FromTy = D->getType();
bool usedDifferentExceptionSpec = false;
if (const FunctionProtoType *
FromFPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo();
// FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the
// FunctionDecl that we are importing the FunctionProtoType for.
// To avoid an infinite recursion when importing, create the FunctionDecl
// with a simplified function type and update it afterwards.
if (FromEPI.ExceptionSpec.SourceDecl ||
FromEPI.ExceptionSpec.SourceTemplate ||
FromEPI.ExceptionSpec.NoexceptExpr) {
FunctionProtoType::ExtProtoInfo DefaultEPI;
FromTy = Importer.getFromContext().getFunctionType(
FromFPT->getReturnType(), FromFPT->getParamTypes(), DefaultEPI, None); // HLSL Change - add param modifiers
usedDifferentExceptionSpec = true;
}
}
// Import the type.
QualType T = Importer.Import(FromTy);
if (T.isNull())
return nullptr;
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
for (auto P : D->params()) {
ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
if (!ToP)
return nullptr;
Parameters.push_back(ToP);
}
// Create the imported function.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
FunctionDecl *ToFunction = nullptr;
SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart());
if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
cast<CXXRecordDecl>(DC),
InnerLocStart,
NameInfo, T, TInfo,
FromConstructor->isExplicit(),
D->isInlineSpecified(),
D->isImplicit(),
D->isConstexpr());
} else if (isa<CXXDestructorDecl>(D)) {
ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
cast<CXXRecordDecl>(DC),
InnerLocStart,
NameInfo, T, TInfo,
D->isInlineSpecified(),
D->isImplicit());
} else if (CXXConversionDecl *FromConversion
= dyn_cast<CXXConversionDecl>(D)) {
ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
cast<CXXRecordDecl>(DC),
InnerLocStart,
NameInfo, T, TInfo,
D->isInlineSpecified(),
FromConversion->isExplicit(),
D->isConstexpr(),
Importer.Import(D->getLocEnd()));
} else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
cast<CXXRecordDecl>(DC),
InnerLocStart,
NameInfo, T, TInfo,
Method->getStorageClass(),
Method->isInlineSpecified(),
D->isConstexpr(),
Importer.Import(D->getLocEnd()));
} else {
ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
InnerLocStart,
NameInfo, T, TInfo, D->getStorageClass(),
D->isInlineSpecified(),
D->hasWrittenPrototype(),
D->isConstexpr());
}
// Import the qualifier, if any.
ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
ToFunction->setTrivial(D->isTrivial());
ToFunction->setPure(D->isPure());
Importer.Imported(D, ToFunction);
// Set the parameters.
for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
Parameters[I]->setOwningFunction(ToFunction);
ToFunction->addDeclInternal(Parameters[I]);
}
ToFunction->setParams(Parameters);
if (usedDifferentExceptionSpec) {
// Update FunctionProtoType::ExtProtoInfo.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
ToFunction->setType(T);
}
// Import the body, if any.
if (Stmt *FromBody = D->getBody()) {
if (Stmt *ToBody = Importer.Import(FromBody)) {
ToFunction->setBody(ToBody);
}
}
// FIXME: Other bits to merge?
// Add this function to the lexical context.
LexicalDC->addDeclInternal(ToFunction);
return ToFunction;
}
Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
return VisitFunctionDecl(D);
}
Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
return VisitCXXMethodDecl(D);
}
Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
return VisitCXXMethodDecl(D);
}
Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
return VisitCXXMethodDecl(D);
}
static unsigned getFieldIndex(Decl *F) {
RecordDecl *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
if (!Owner)
return 0;
unsigned Index = 1;
for (const auto *D : Owner->noload_decls()) {
if (D == F)
return Index;
if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
++Index;
}
return Index;
}
Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Determine whether we've already imported this field.
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
// For anonymous fields, match up by index.
if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType())) {
Importer.Imported(D, FoundField);
return FoundField;
}
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
return nullptr;
}
}
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
Expr *BitWidth = Importer.Import(D->getBitWidth());
if (!BitWidth && D->getBitWidth())
return nullptr;
FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, BitWidth, D->isMutable(),
D->getInClassInitStyle());
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
if (ToField->hasInClassInitializer())
ToField->setInClassInitializer(D->getInClassInitializer());
ToField->setImplicit(D->isImplicit());
Importer.Imported(D, ToField);
LexicalDC->addDeclInternal(ToField);
return ToField;
}
Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Determine whether we've already imported this field.
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (IndirectFieldDecl *FoundField
= dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
// For anonymous indirect fields, match up by index.
if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType(),
!Name.isEmpty())) {
Importer.Imported(D, FoundField);
return FoundField;
}
// If there are more anonymous fields to check, continue.
if (!Name && I < N-1)
continue;
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
return nullptr;
}
}
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
NamedDecl **NamedChain =
new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
unsigned i = 0;
for (auto *PI : D->chain()) {
Decl *D = Importer.Import(PI);
if (!D)
return nullptr;
NamedChain[i++] = cast<NamedDecl>(D);
}
IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), T,
NamedChain, D->getChainingSize());
for (const auto *Attr : D->attrs())
ToIndirectField->addAttr(Attr->clone(Importer.getToContext()));
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIndirectField);
LexicalDC->addDeclInternal(ToIndirectField);
return ToIndirectField;
}
Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Import the major distinguishing characteristics of an ivar.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Determine whether we've already imported this ivar
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundIvar->getType())) {
Importer.Imported(D, FoundIvar);
return FoundIvar;
}
Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
<< Name << D->getType() << FoundIvar->getType();
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
return nullptr;
}
}
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
Expr *BitWidth = Importer.Import(D->getBitWidth());
if (!BitWidth && D->getBitWidth())
return nullptr;
ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(),
cast<ObjCContainerDecl>(DC),
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, D->getAccessControl(),
BitWidth, D->getSynthesize());
ToIvar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIvar);
LexicalDC->addDeclInternal(ToIvar);
return ToIvar;
}
Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Try to find a variable in our own ("to") context with the same name and
// in the same context as the variable we're importing.
if (D->isFileVarDecl()) {
VarDecl *MergeWithVar = nullptr;
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) {
// We have found a variable that we may need to merge with. Check it.
if (FoundVar->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundVar->getType())) {
MergeWithVar = FoundVar;
break;
}
const ArrayType *FoundArray
= Importer.getToContext().getAsArrayType(FoundVar->getType());
const ArrayType *TArray
= Importer.getToContext().getAsArrayType(D->getType());
if (FoundArray && TArray) {
if (isa<IncompleteArrayType>(FoundArray) &&
isa<ConstantArrayType>(TArray)) {
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
FoundVar->setType(T);
MergeWithVar = FoundVar;
break;
} else if (isa<IncompleteArrayType>(TArray) &&
isa<ConstantArrayType>(FoundArray)) {
MergeWithVar = FoundVar;
break;
}
}
Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent)
<< Name << D->getType() << FoundVar->getType();
Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
<< FoundVar->getType();
}
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (MergeWithVar) {
// An equivalent variable with external linkage has been found. Link
// the two declarations, then merge them.
Importer.Imported(D, MergeWithVar);
if (VarDecl *DDef = D->getDefinition()) {
if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
Importer.ToDiag(ExistingDef->getLocation(),
diag::err_odr_variable_multiple_def)
<< Name;
Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here);
} else {
Expr *Init = Importer.Import(DDef->getInit());
MergeWithVar->setInit(Init);
if (DDef->isInitKnownICE()) {
EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
Eval->CheckedICE = true;
Eval->IsICE = DDef->isInitICE();
}
}
}
return MergeWithVar;
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
return nullptr;
}
}
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
// Create the imported variable.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo,
D->getStorageClass());
ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToVar);
LexicalDC->addDeclInternal(ToVar);
if (!D->isFileVarDecl() &&
D->isUsed())
ToVar->setIsUsed();
// Merge the initializer.
if (ImportDefinition(D, ToVar))
return nullptr;
return ToVar;
}
Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
// Import the name of this declaration.
DeclarationName Name = Importer.Import(D->getDeclName());
if (D->getDeclName() && !Name)
return nullptr;
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
// Import the parameter's type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
// Create the imported parameter.
ImplicitParamDecl *ToParm
= ImplicitParamDecl::Create(Importer.getToContext(), DC,
Loc, Name.getAsIdentifierInfo(),
T);
return Importer.Imported(D, ToParm);
}
Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
// Import the name of this declaration.
DeclarationName Name = Importer.Import(D->getDeclName());
if (D->getDeclName() && !Name)
return nullptr;
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
// Import the parameter's type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
// Create the imported parameter.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, D->getStorageClass(),
/*FIXME: Default argument*/nullptr);
ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
if (D->isUsed())
ToParm->setIsUsed();
return Importer.Imported(D, ToParm);
}
Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the major distinguishing characteristics of a method.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
continue;
// Check return types.
if (!Importer.IsStructurallyEquivalent(D->getReturnType(),
FoundMethod->getReturnType())) {
Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
<< D->isInstanceMethod() << Name << D->getReturnType()
<< FoundMethod->getReturnType();
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
return nullptr;
}
// Check the number of parameters.
if (D->param_size() != FoundMethod->param_size()) {
Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
<< D->isInstanceMethod() << Name
<< D->param_size() << FoundMethod->param_size();
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
return nullptr;
}
// Check parameter types.
for (ObjCMethodDecl::param_iterator P = D->param_begin(),
PEnd = D->param_end(), FoundP = FoundMethod->param_begin();
P != PEnd; ++P, ++FoundP) {
if (!Importer.IsStructurallyEquivalent((*P)->getType(),
(*FoundP)->getType())) {
Importer.FromDiag((*P)->getLocation(),
diag::err_odr_objc_method_param_type_inconsistent)
<< D->isInstanceMethod() << Name
<< (*P)->getType() << (*FoundP)->getType();
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
<< (*FoundP)->getType();
return nullptr;
}
}
// Check variadic/non-variadic.
// Check the number of parameters.
if (D->isVariadic() != FoundMethod->isVariadic()) {
Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
<< D->isInstanceMethod() << Name;
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
return nullptr;
}
// FIXME: Any other bits we need to merge?
return Importer.Imported(D, FoundMethod);
}
}
// Import the result type.
QualType ResultTy = Importer.Import(D->getReturnType());
if (ResultTy.isNull())
return nullptr;
TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
ObjCMethodDecl *ToMethod = ObjCMethodDecl::Create(
Importer.getToContext(), Loc, Importer.Import(D->getLocEnd()),
Name.getObjCSelector(), ResultTy, ReturnTInfo, DC, D->isInstanceMethod(),
D->isVariadic(), D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
D->getImplementationControl(), D->hasRelatedResultType());
// FIXME: When we decide to merge method definitions, we'll need to
// deal with implicit parameters.
// Import the parameters
SmallVector<ParmVarDecl *, 5> ToParams;
for (auto *FromP : D->params()) {
ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
if (!ToP)
return nullptr;
ToParams.push_back(ToP);
}
// Set the parameters.
for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
ToParams[I]->setOwningFunction(ToMethod);
ToMethod->addDeclInternal(ToParams[I]);
}
SmallVector<SourceLocation, 12> SelLocs;
D->getSelectorLocs(SelLocs);
ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
ToMethod->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToMethod);
LexicalDC->addDeclInternal(ToMethod);
return ToMethod;
}
Decl *ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
TypeSourceInfo *BoundInfo = Importer.Import(D->getTypeSourceInfo());
if (!BoundInfo)
return nullptr;
ObjCTypeParamDecl *Result = ObjCTypeParamDecl::Create(
Importer.getToContext(), DC,
D->getVariance(),
Importer.Import(D->getVarianceLoc()),
D->getIndex(),
Importer.Import(D->getLocation()),
Name.getAsIdentifierInfo(),
Importer.Import(D->getColonLoc()),
BoundInfo);
Importer.Imported(D, Result);
Result->setLexicalDeclContext(LexicalDC);
return Result;
}
Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
ObjCInterfaceDecl *ToInterface
= cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
if (!ToInterface)
return nullptr;
// Determine if we've already encountered this category.
ObjCCategoryDecl *MergeWithCategory
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getAtStartLoc()),
Loc,
Importer.Import(D->getCategoryNameLoc()),
Name.getAsIdentifierInfo(),
ToInterface,
/*TypeParamList=*/nullptr,
Importer.Import(D->getIvarLBraceLoc()),
Importer.Import(D->getIvarRBraceLoc()));
ToCategory->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToCategory);
Importer.Imported(D, ToCategory);
// Import the type parameter list after calling Imported, to avoid
// loops when bringing in their DeclContext.
ToCategory->setTypeParamList(ImportObjCTypeParamList(
D->getTypeParamList()));
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc
= D->protocol_loc_begin();
for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(),
FromProtoEnd = D->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
ObjCProtocolDecl *ToProto
= cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return nullptr;
Protocols.push_back(ToProto);
ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
}
// FIXME: If we're merging, make sure that the protocol list is the same.
ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
} else {
Importer.Imported(D, ToCategory);
}
// Import all of the members of this category.
ImportDeclContext(D);
// If we have an implementation, import it as well.
if (D->getImplementation()) {
ObjCCategoryImplDecl *Impl
= cast_or_null<ObjCCategoryImplDecl>(
Importer.Import(D->getImplementation()));
if (!Impl)
return nullptr;
ToCategory->setImplementation(Impl);
}
return ToCategory;
}
bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
ObjCProtocolDecl *To,
ImportDefinitionKind Kind) {
if (To->getDefinition()) {
if (shouldForceImportDeclContext(Kind))
ImportDeclContext(From);
return false;
}
// Start the protocol definition
To->startDefinition();
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
ObjCProtocolDecl::protocol_loc_iterator
FromProtoLoc = From->protocol_loc_begin();
for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
ObjCProtocolDecl *ToProto
= cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return true;
Protocols.push_back(ToProto);
ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
}
// FIXME: If we're merging, make sure that the protocol list is the same.
To->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this protocol.
ImportDeclContext(From, /*ForceImport=*/true);
}
return false;
}
Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
// If this protocol has a definition in the translation unit we're coming
// from, but this particular declaration is not that definition, import the
// definition and map to that.
ObjCProtocolDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
// Import the major distinguishing characteristics of a protocol.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
ObjCProtocolDecl *MergeWithProtocol = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
continue;
if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I])))
break;
}
ObjCProtocolDecl *ToProto = MergeWithProtocol;
if (!ToProto) {
ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
Name.getAsIdentifierInfo(), Loc,
Importer.Import(D->getAtStartLoc()),
/*PrevDecl=*/nullptr);
ToProto->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProto);
}
Importer.Imported(D, ToProto);
if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
return nullptr;
return ToProto;
}
Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
DeclContext *DC = Importer.ImportContext(D->getDeclContext());
DeclContext *LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
SourceLocation ExternLoc = Importer.Import(D->getExternLoc());
SourceLocation LangLoc = Importer.Import(D->getLocation());
bool HasBraces = D->hasBraces();
LinkageSpecDecl *ToLinkageSpec =
LinkageSpecDecl::Create(Importer.getToContext(),
DC,
ExternLoc,
LangLoc,
D->getLanguage(),
HasBraces);
if (HasBraces) {
SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
ToLinkageSpec->setRBraceLoc(RBraceLoc);
}
ToLinkageSpec->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToLinkageSpec);
Importer.Imported(D, ToLinkageSpec);
return ToLinkageSpec;
}
bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
ObjCInterfaceDecl *To,
ImportDefinitionKind Kind) {
if (To->getDefinition()) {
// Check consistency of superclass.
ObjCInterfaceDecl *FromSuper = From->getSuperClass();
if (FromSuper) {
FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
if (!FromSuper)
return true;
}
ObjCInterfaceDecl *ToSuper = To->getSuperClass();
if ((bool)FromSuper != (bool)ToSuper ||
(FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
Importer.ToDiag(To->getLocation(),
diag::err_odr_objc_superclass_inconsistent)
<< To->getDeclName();
if (ToSuper)
Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
<< To->getSuperClass()->getDeclName();
else
Importer.ToDiag(To->getLocation(),
diag::note_odr_objc_missing_superclass);
if (From->getSuperClass())
Importer.FromDiag(From->getSuperClassLoc(),
diag::note_odr_objc_superclass)
<< From->getSuperClass()->getDeclName();
else
Importer.FromDiag(From->getLocation(),
diag::note_odr_objc_missing_superclass);
}
if (shouldForceImportDeclContext(Kind))
ImportDeclContext(From);
return false;
}
// Start the definition.
To->startDefinition();
// If this class has a superclass, import it.
if (From->getSuperClass()) {
TypeSourceInfo *SuperTInfo = Importer.Import(From->getSuperClassTInfo());
if (!SuperTInfo)
return true;
To->setSuperClass(SuperTInfo);
}
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
ObjCInterfaceDecl::protocol_loc_iterator
FromProtoLoc = From->protocol_loc_begin();
for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
ObjCProtocolDecl *ToProto
= cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return true;
Protocols.push_back(ToProto);
ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
}
// FIXME: If we're merging, make sure that the protocol list is the same.
To->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
// Import categories. When the categories themselves are imported, they'll
// hook themselves into this interface.
for (auto *Cat : From->known_categories())
Importer.Import(Cat);
// If we have an @implementation, import it as well.
if (From->getImplementation()) {
ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
Importer.Import(From->getImplementation()));
if (!Impl)
return true;
To->setImplementation(Impl);
}
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this class.
ImportDeclContext(From, /*ForceImport=*/true);
}
return false;
}
ObjCTypeParamList *
ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) {
if (!list)
return nullptr;
SmallVector<ObjCTypeParamDecl *, 4> toTypeParams;
for (auto fromTypeParam : *list) {
auto toTypeParam = cast_or_null<ObjCTypeParamDecl>(
Importer.Import(fromTypeParam));
if (!toTypeParam)
return nullptr;
toTypeParams.push_back(toTypeParam);
}
return ObjCTypeParamList::create(Importer.getToContext(),
Importer.Import(list->getLAngleLoc()),
toTypeParams,
Importer.Import(list->getRAngleLoc()));
}
Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// If this class has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
ObjCInterfaceDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
// Import the major distinguishing characteristics of an @interface.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Look for an existing interface with the same name.
ObjCInterfaceDecl *MergeWithIface = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I])))
break;
}
// Create an interface declaration, if one does not already exist.
ObjCInterfaceDecl *ToIface = MergeWithIface;
if (!ToIface) {
ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getAtStartLoc()),
Name.getAsIdentifierInfo(),
/*TypeParamList=*/nullptr,
/*PrevDecl=*/nullptr, Loc,
D->isImplicitInterfaceDecl());
ToIface->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToIface);
}
Importer.Imported(D, ToIface);
// Import the type parameter list after calling Imported, to avoid
// loops when bringing in their DeclContext.
ToIface->setTypeParamList(ImportObjCTypeParamList(
D->getTypeParamListAsWritten()));
if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
return nullptr;
return ToIface;
}
Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
Importer.Import(D->getCategoryDecl()));
if (!Category)
return nullptr;
ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
if (!ToImpl) {
DeclContext *DC = Importer.ImportContext(D->getDeclContext());
if (!DC)
return nullptr;
SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getIdentifier()),
Category->getClassInterface(),
Importer.Import(D->getLocation()),
Importer.Import(D->getAtStartLoc()),
CategoryNameLoc);
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return nullptr;
ToImpl->setLexicalDeclContext(LexicalDC);
}
LexicalDC->addDeclInternal(ToImpl);
Category->setImplementation(ToImpl);
}
Importer.Imported(D, ToImpl);
ImportDeclContext(D);
return ToImpl;
}
Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
// Find the corresponding interface.
ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
Importer.Import(D->getClassInterface()));
if (!Iface)
return nullptr;
// Import the superclass, if any.
ObjCInterfaceDecl *Super = nullptr;
if (D->getSuperClass()) {
Super = cast_or_null<ObjCInterfaceDecl>(
Importer.Import(D->getSuperClass()));
if (!Super)
return nullptr;
}
ObjCImplementationDecl *Impl = Iface->getImplementation();
if (!Impl) {
// We haven't imported an implementation yet. Create a new @implementation
// now.
Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
Importer.ImportContext(D->getDeclContext()),
Iface, Super,
Importer.Import(D->getLocation()),
Importer.Import(D->getAtStartLoc()),
Importer.Import(D->getSuperClassLoc()),
Importer.Import(D->getIvarLBraceLoc()),
Importer.Import(D->getIvarRBraceLoc()));
if (D->getDeclContext() != D->getLexicalDeclContext()) {
DeclContext *LexicalDC
= Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return nullptr;
Impl->setLexicalDeclContext(LexicalDC);
}
// Associate the implementation with the class it implements.
Iface->setImplementation(Impl);
Importer.Imported(D, Iface->getImplementation());
} else {
Importer.Imported(D, Iface->getImplementation());
// Verify that the existing @implementation has the same superclass.
if ((Super && !Impl->getSuperClass()) ||
(!Super && Impl->getSuperClass()) ||
(Super && Impl->getSuperClass() &&
!declaresSameEntity(Super->getCanonicalDecl(),
Impl->getSuperClass()))) {
Importer.ToDiag(Impl->getLocation(),
diag::err_odr_objc_superclass_inconsistent)
<< Iface->getDeclName();
// FIXME: It would be nice to have the location of the superclass
// below.
if (Impl->getSuperClass())
Importer.ToDiag(Impl->getLocation(),
diag::note_odr_objc_superclass)
<< Impl->getSuperClass()->getDeclName();
else
Importer.ToDiag(Impl->getLocation(),
diag::note_odr_objc_missing_superclass);
if (D->getSuperClass())
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_superclass)
<< D->getSuperClass()->getDeclName();
else
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_missing_superclass);
return nullptr;
}
}
// Import all of the members of this @implementation.
ImportDeclContext(D);
return Impl;
}
Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Import the major distinguishing characteristics of an @property.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// Check whether we have already imported this property.
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (ObjCPropertyDecl *FoundProp
= dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
// Check property types.
if (!Importer.IsStructurallyEquivalent(D->getType(),
FoundProp->getType())) {
Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
<< Name << D->getType() << FoundProp->getType();
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
return nullptr;
}
// FIXME: Check property attributes, getters, setters, etc.?
// Consider these properties to be equivalent.
Importer.Imported(D, FoundProp);
return FoundProp;
}
}
// Import the type.
TypeSourceInfo *TSI = Importer.Import(D->getTypeSourceInfo());
if (!TSI)
return nullptr;
// Create the new property.
ObjCPropertyDecl *ToProperty
= ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
Name.getAsIdentifierInfo(),
Importer.Import(D->getAtLoc()),
Importer.Import(D->getLParenLoc()),
Importer.Import(D->getType()),
TSI,
D->getPropertyImplementation());
Importer.Imported(D, ToProperty);
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
ToProperty->setPropertyAttributesAsWritten(
D->getPropertyAttributesAsWritten());
ToProperty->setGetterName(Importer.Import(D->getGetterName()));
ToProperty->setSetterName(Importer.Import(D->getSetterName()));
ToProperty->setGetterMethodDecl(
cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
ToProperty->setSetterMethodDecl(
cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
ToProperty->setPropertyIvarDecl(
cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
return ToProperty;
}
Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
Importer.Import(D->getPropertyDecl()));
if (!Property)
return nullptr;
DeclContext *DC = Importer.ImportContext(D->getDeclContext());
if (!DC)
return nullptr;
// Import the lexical declaration context.
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return nullptr;
}
ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
if (!InImpl)
return nullptr;
// Import the ivar (for an @synthesize).
ObjCIvarDecl *Ivar = nullptr;
if (D->getPropertyIvarDecl()) {
Ivar = cast_or_null<ObjCIvarDecl>(
Importer.Import(D->getPropertyIvarDecl()));
if (!Ivar)
return nullptr;
}
ObjCPropertyImplDecl *ToImpl
= InImpl->FindPropertyImplDecl(Property->getIdentifier());
if (!ToImpl) {
ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getLocStart()),
Importer.Import(D->getLocation()),
Property,
D->getPropertyImplementation(),
Ivar,
Importer.Import(D->getPropertyIvarDeclLoc()));
ToImpl->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToImpl);
LexicalDC->addDeclInternal(ToImpl);
} else {
// Check that we have the same kind of property implementation (@synthesize
// vs. @dynamic).
if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
Importer.ToDiag(ToImpl->getLocation(),
diag::err_odr_objc_property_impl_kind_inconsistent)
<< Property->getDeclName()
<< (ToImpl->getPropertyImplementation()
== ObjCPropertyImplDecl::Dynamic);
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_property_impl_kind)
<< D->getPropertyDecl()->getDeclName()
<< (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
return nullptr;
}
// For @synthesize, check that we have the same
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
Ivar != ToImpl->getPropertyIvarDecl()) {
Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
diag::err_odr_objc_synthesize_ivar_inconsistent)
<< Property->getDeclName()
<< ToImpl->getPropertyIvarDecl()->getDeclName()
<< Ivar->getDeclName();
Importer.FromDiag(D->getPropertyIvarDeclLoc(),
diag::note_odr_objc_synthesize_ivar_here)
<< D->getPropertyIvarDecl()->getDeclName();
return nullptr;
}
// Merge the existing implementation with the new implementation.
Importer.Imported(D, ToImpl);
}
return ToImpl;
}
Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// For template arguments, we adopt the translation unit as our declaration
// context. This context will be fixed when the actual template declaration
// is created.
// FIXME: Import default argument.
return TemplateTypeParmDecl::Create(Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
Importer.Import(D->getLocStart()),
Importer.Import(D->getLocation()),
D->getDepth(),
D->getIndex(),
Importer.Import(D->getIdentifier()),
D->wasDeclaredWithTypename(),
D->isParameterPack());
}
Decl *
ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// Import the name of this declaration.
DeclarationName Name = Importer.Import(D->getDeclName());
if (D->getDeclName() && !Name)
return nullptr;
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
// Import the type of this declaration.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
// Import type-source information.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
if (D->getTypeSourceInfo() && !TInfo)
return nullptr;
// FIXME: Import default argument.
return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
Importer.Import(D->getInnerLocStart()),
Loc, D->getDepth(), D->getPosition(),
Name.getAsIdentifierInfo(),
T, D->isParameterPack(), TInfo);
}
Decl *
ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// Import the name of this declaration.
DeclarationName Name = Importer.Import(D->getDeclName());
if (D->getDeclName() && !Name)
return nullptr;
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
// Import template parameters.
TemplateParameterList *TemplateParams
= ImportTemplateParameterList(D->getTemplateParameters());
if (!TemplateParams)
return nullptr;
// FIXME: Import default argument.
return TemplateTemplateParmDecl::Create(Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
Loc, D->getDepth(), D->getPosition(),
D->isParameterPack(),
Name.getAsIdentifierInfo(),
TemplateParams);
}
Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
CXXRecordDecl *Definition
= cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
Decl *ImportedDef
= Importer.Import(Definition->getDescribedClassTemplate());
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
// Import the major distinguishing characteristics of this class template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// We may already have a template of the same name; try to find and match it.
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
Decl *Found = FoundDecls[I];
if (ClassTemplateDecl *FoundTemplate
= dyn_cast<ClassTemplateDecl>(Found)) {
if (IsStructuralMatch(D, FoundTemplate)) {
// The class templates structurally match; call it the same template.
// FIXME: We may be filling in a forward declaration here. Handle
// this case!
Importer.Imported(D->getTemplatedDecl(),
FoundTemplate->getTemplatedDecl());
return Importer.Imported(D, FoundTemplate);
}
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
ConflictingDecls.data(),
ConflictingDecls.size());
}
if (!Name)
return nullptr;
}
CXXRecordDecl *DTemplated = D->getTemplatedDecl();
// Create the declaration that is being templated.
SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(),
DTemplated->getTagKind(),
DC, StartLoc, IdLoc,
Name.getAsIdentifierInfo());
D2Templated->setAccess(DTemplated->getAccess());
D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
D2Templated->setLexicalDeclContext(LexicalDC);
// Create the class template declaration itself.
TemplateParameterList *TemplateParams
= ImportTemplateParameterList(D->getTemplateParameters());
if (!TemplateParams)
return nullptr;
ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
Loc, Name, TemplateParams,
D2Templated,
/*PrevDecl=*/nullptr);
D2Templated->setDescribedClassTemplate(D2);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
// Note the relationship between the class templates.
Importer.Imported(D, D2);
Importer.Imported(DTemplated, D2Templated);
if (DTemplated->isCompleteDefinition() &&
!D2Templated->isCompleteDefinition()) {
// FIXME: Import definition!
}
return D2;
}
Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
ClassTemplateDecl *ClassTemplate
= cast_or_null<ClassTemplateDecl>(Importer.Import(
D->getSpecializedTemplate()));
if (!ClassTemplate)
return nullptr;
// Import the context of this declaration.
DeclContext *DC = ClassTemplate->getDeclContext();
if (!DC)
return nullptr;
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return nullptr;
}
// Import the location of this declaration.
SourceLocation StartLoc = Importer.Import(D->getLocStart());
SourceLocation IdLoc = Importer.Import(D->getLocation());
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
if (ImportTemplateArguments(D->getTemplateArgs().data(),
D->getTemplateArgs().size(),
TemplateArgs))
return nullptr;
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *D2
= ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
if (D2) {
// We already have a class template specialization with these template
// arguments.
// FIXME: Check for specialization vs. instantiation errors.
if (RecordDecl *FoundDef = D2->getDefinition()) {
if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
return Importer.Imported(D, FoundDef);
}
}
} else {
// Create a new specialization.
D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
D->getTagKind(), DC,
StartLoc, IdLoc,
ClassTemplate,
TemplateArgs.data(),
TemplateArgs.size(),
/*PrevDecl=*/nullptr);
D2->setSpecializationKind(D->getSpecializationKind());
// Add this specialization to the class template.
ClassTemplate->AddSpecialization(D2, InsertPos);
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
// Add the specialization to this context.
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
}
Importer.Imported(D, D2);
if (D->isCompleteDefinition() && ImportDefinition(D, D2))
return nullptr;
return D2;
}
Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// If this variable has a definition in the translation unit we're coming
// from,
// but this particular declaration is not that definition, import the
// definition and map to that.
VarDecl *Definition =
cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate());
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
// Import the major distinguishing characteristics of this variable template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
return nullptr;
if (ToD)
return ToD;
// We may already have a template of the same name; try to find and match it.
assert(!DC->isFunctionOrMethod() &&
"Variable templates cannot be declared at function scope");
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
Decl *Found = FoundDecls[I];
if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
if (IsStructuralMatch(D, FoundTemplate)) {
// The variable templates structurally match; call it the same template.
Importer.Imported(D->getTemplatedDecl(),
FoundTemplate->getTemplatedDecl());
return Importer.Imported(D, FoundTemplate);
}
}
ConflictingDecls.push_back(FoundDecls[I]);
}
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
ConflictingDecls.data(),
ConflictingDecls.size());
}
if (!Name)
return nullptr;
VarDecl *DTemplated = D->getTemplatedDecl();
// Import the type.
QualType T = Importer.Import(DTemplated->getType());
if (T.isNull())
return nullptr;
// Create the declaration that is being templated.
SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
TypeSourceInfo *TInfo = Importer.Import(DTemplated->getTypeSourceInfo());
VarDecl *D2Templated = VarDecl::Create(Importer.getToContext(), DC, StartLoc,
IdLoc, Name.getAsIdentifierInfo(), T,
TInfo, DTemplated->getStorageClass());
D2Templated->setAccess(DTemplated->getAccess());
D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
D2Templated->setLexicalDeclContext(LexicalDC);
// Importer.Imported(DTemplated, D2Templated);
// LexicalDC->addDeclInternal(D2Templated);
// Merge the initializer.
if (ImportDefinition(DTemplated, D2Templated))
return nullptr;
// Create the variable template declaration itself.
TemplateParameterList *TemplateParams =
ImportTemplateParameterList(D->getTemplateParameters());
if (!TemplateParams)
return nullptr;
VarTemplateDecl *D2 = VarTemplateDecl::Create(
Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated);
D2Templated->setDescribedVarTemplate(D2);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
// Note the relationship between the variable templates.
Importer.Imported(D, D2);
Importer.Imported(DTemplated, D2Templated);
if (DTemplated->isThisDeclarationADefinition() &&
!D2Templated->isThisDeclarationADefinition()) {
// FIXME: Import definition!
}
return D2;
}
Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
VarDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
return Importer.Imported(D, ImportedDef);
}
VarTemplateDecl *VarTemplate = cast_or_null<VarTemplateDecl>(
Importer.Import(D->getSpecializedTemplate()));
if (!VarTemplate)
return nullptr;
// Import the context of this declaration.
DeclContext *DC = VarTemplate->getDeclContext();
if (!DC)
return nullptr;
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
if (!LexicalDC)
return nullptr;
}
// Import the location of this declaration.
SourceLocation StartLoc = Importer.Import(D->getLocStart());
SourceLocation IdLoc = Importer.Import(D->getLocation());
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
if (ImportTemplateArguments(D->getTemplateArgs().data(),
D->getTemplateArgs().size(), TemplateArgs))
return nullptr;
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization(
TemplateArgs, InsertPos);
if (D2) {
// We already have a variable template specialization with these template
// arguments.
// FIXME: Check for specialization vs. instantiation errors.
if (VarDecl *FoundDef = D2->getDefinition()) {
if (!D->isThisDeclarationADefinition() ||
IsStructuralMatch(D, FoundDef)) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// variable.
return Importer.Imported(D, FoundDef);
}
}
} else {
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
// Create a new specialization.
D2 = VarTemplateSpecializationDecl::Create(
Importer.getToContext(), DC, StartLoc, IdLoc, VarTemplate, T, TInfo,
D->getStorageClass(), TemplateArgs.data(), TemplateArgs.size());
D2->setSpecializationKind(D->getSpecializationKind());
D2->setTemplateArgsInfo(D->getTemplateArgsInfo());
// Add this specialization to the class template.
VarTemplate->AddSpecialization(D2, InsertPos);
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
// Add the specialization to this context.
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
}
Importer.Imported(D, D2);
if (D->isThisDeclarationADefinition() && ImportDefinition(D, D2))
return nullptr;
return D2;
}
//----------------------------------------------------------------------------
// Import Statements
//----------------------------------------------------------------------------
DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) {
if (DG.isNull())
return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
size_t NumDecls = DG.end() - DG.begin();
SmallVector<Decl *, 1> ToDecls(NumDecls);
auto &_Importer = this->Importer;
std::transform(DG.begin(), DG.end(), ToDecls.begin(),
[&_Importer](Decl *D) -> Decl * {
return _Importer.Import(D);
});
return DeclGroupRef::Create(Importer.getToContext(),
ToDecls.begin(),
NumDecls);
}
Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
<< S->getStmtClassName();
return nullptr;
}
Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup());
for (Decl *ToD : ToDG) {
if (!ToD)
return nullptr;
}
SourceLocation ToStartLoc = Importer.Import(S->getStartLoc());
SourceLocation ToEndLoc = Importer.Import(S->getEndLoc());
return new (Importer.getToContext()) DeclStmt(ToDG, ToStartLoc, ToEndLoc);
}
Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
SourceLocation ToSemiLoc = Importer.Import(S->getSemiLoc());
return new (Importer.getToContext()) NullStmt(ToSemiLoc,
S->hasLeadingEmptyMacro());
}
Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
SmallVector<Stmt *, 4> ToStmts(S->size());
auto &_Importer = this->Importer;
std::transform(S->body_begin(), S->body_end(), ToStmts.begin(),
[&_Importer](Stmt *CS) -> Stmt * {
return _Importer.Import(CS);
});
for (Stmt *ToS : ToStmts) {
if (!ToS)
return nullptr;
}
SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc());
SourceLocation ToRBraceLoc = Importer.Import(S->getRBracLoc());
return new (Importer.getToContext()) CompoundStmt(Importer.getToContext(),
ToStmts,
ToLBraceLoc, ToRBraceLoc);
}
Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
Expr *ToLHS = Importer.Import(S->getLHS());
if (!ToLHS)
return nullptr;
Expr *ToRHS = Importer.Import(S->getRHS());
if (!ToRHS && S->getRHS())
return nullptr;
SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc());
SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc());
SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
return new (Importer.getToContext()) CaseStmt(ToLHS, ToRHS,
ToCaseLoc, ToEllipsisLoc,
ToColonLoc);
}
Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
SourceLocation ToDefaultLoc = Importer.Import(S->getDefaultLoc());
SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
if (!ToSubStmt && S->getSubStmt())
return nullptr;
return new (Importer.getToContext()) DefaultStmt(ToDefaultLoc, ToColonLoc,
ToSubStmt);
}
Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc());
LabelDecl *ToLabelDecl =
cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
if (!ToLabelDecl && S->getDecl())
return nullptr;
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
if (!ToSubStmt && S->getSubStmt())
return nullptr;
return new (Importer.getToContext()) LabelStmt(ToIdentLoc, ToLabelDecl,
ToSubStmt);
}
Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc());
ArrayRef<const Attr*> FromAttrs(S->getAttrs());
SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
ASTContext &_ToContext = Importer.getToContext();
std::transform(FromAttrs.begin(), FromAttrs.end(), ToAttrs.begin(),
[&_ToContext](const Attr *A) -> const Attr * {
return A->clone(_ToContext);
});
for (const Attr *ToA : ToAttrs) {
if (!ToA)
return nullptr;
}
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
if (!ToSubStmt && S->getSubStmt())
return nullptr;
return AttributedStmt::Create(Importer.getToContext(), ToAttrLoc,
ToAttrs, ToSubStmt);
}
Stmt *ASTNodeImporter::VisitIfStmt(IfStmt *S) {
SourceLocation ToIfLoc = Importer.Import(S->getIfLoc());
VarDecl *ToConditionVariable = nullptr;
if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
ToConditionVariable =
dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
if (!ToConditionVariable)
return nullptr;
}
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
Stmt *ToThenStmt = Importer.Import(S->getThen());
if (!ToThenStmt && S->getThen())
return nullptr;
SourceLocation ToElseLoc = Importer.Import(S->getElseLoc());
Stmt *ToElseStmt = Importer.Import(S->getElse());
if (!ToElseStmt && S->getElse())
return nullptr;
return new (Importer.getToContext()) IfStmt(Importer.getToContext(),
ToIfLoc, ToConditionVariable,
ToCondition, ToThenStmt,
ToElseLoc, ToElseStmt);
}
Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
VarDecl *ToConditionVariable = nullptr;
if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
ToConditionVariable =
dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
if (!ToConditionVariable)
return nullptr;
}
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
SwitchStmt *ToStmt = new (Importer.getToContext()) SwitchStmt(
Importer.getToContext(), ToConditionVariable,
ToCondition);
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
ToStmt->setBody(ToBody);
ToStmt->setSwitchLoc(Importer.Import(S->getSwitchLoc()));
// Now we have to re-chain the cases.
SwitchCase *LastChainedSwitchCase = nullptr;
for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr;
SC = SC->getNextSwitchCase()) {
SwitchCase *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
if (!ToSC)
return nullptr;
if (LastChainedSwitchCase)
LastChainedSwitchCase->setNextSwitchCase(ToSC);
else
ToStmt->setSwitchCaseList(ToSC);
LastChainedSwitchCase = ToSC;
}
return ToStmt;
}
Stmt *ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
VarDecl *ToConditionVariable = nullptr;
if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
ToConditionVariable =
dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
if (!ToConditionVariable)
return nullptr;
}
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
return new (Importer.getToContext()) WhileStmt(Importer.getToContext(),
ToConditionVariable,
ToCondition, ToBody,
ToWhileLoc);
}
Stmt *ASTNodeImporter::VisitDoStmt(DoStmt *S) {
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
SourceLocation ToDoLoc = Importer.Import(S->getDoLoc());
SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
return new (Importer.getToContext()) DoStmt(ToBody, ToCondition,
ToDoLoc, ToWhileLoc,
ToRParenLoc);
}
Stmt *ASTNodeImporter::VisitForStmt(ForStmt *S) {
Stmt *ToInit = Importer.Import(S->getInit());
if (!ToInit && S->getInit())
return nullptr;
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
VarDecl *ToConditionVariable = nullptr;
if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
ToConditionVariable =
dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
if (!ToConditionVariable)
return nullptr;
}
Expr *ToInc = Importer.Import(S->getInc());
if (!ToInc && S->getInc())
return nullptr;
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
SourceLocation ToForLoc = Importer.Import(S->getForLoc());
SourceLocation ToLParenLoc = Importer.Import(S->getLParenLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
return new (Importer.getToContext()) ForStmt(Importer.getToContext(),
ToInit, ToCondition,
ToConditionVariable,
ToInc, ToBody,
ToForLoc, ToLParenLoc,
ToRParenLoc);
}
Stmt *ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
LabelDecl *ToLabel = nullptr;
if (LabelDecl *FromLabel = S->getLabel()) {
ToLabel = dyn_cast_or_null<LabelDecl>(Importer.Import(FromLabel));
if (!ToLabel)
return nullptr;
}
SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
SourceLocation ToLabelLoc = Importer.Import(S->getLabelLoc());
return new (Importer.getToContext()) GotoStmt(ToLabel,
ToGotoLoc, ToLabelLoc);
}
Stmt *ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
SourceLocation ToStarLoc = Importer.Import(S->getStarLoc());
Expr *ToTarget = Importer.Import(S->getTarget());
if (!ToTarget && S->getTarget())
return nullptr;
return new (Importer.getToContext()) IndirectGotoStmt(ToGotoLoc, ToStarLoc,
ToTarget);
}
Stmt *ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
SourceLocation ToContinueLoc = Importer.Import(S->getContinueLoc());
return new (Importer.getToContext()) ContinueStmt(ToContinueLoc);
}
Stmt *ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
SourceLocation ToBreakLoc = Importer.Import(S->getBreakLoc());
return new (Importer.getToContext()) BreakStmt(ToBreakLoc);
}
Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
SourceLocation ToRetLoc = Importer.Import(S->getReturnLoc());
Expr *ToRetExpr = Importer.Import(S->getRetValue());
if (!ToRetExpr && S->getRetValue())
return nullptr;
VarDecl *NRVOCandidate = const_cast<VarDecl*>(S->getNRVOCandidate());
VarDecl *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
if (!ToNRVOCandidate && NRVOCandidate)
return nullptr;
return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr,
ToNRVOCandidate);
}
Stmt *ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
SourceLocation ToCatchLoc = Importer.Import(S->getCatchLoc());
VarDecl *ToExceptionDecl = nullptr;
if (VarDecl *FromExceptionDecl = S->getExceptionDecl()) {
ToExceptionDecl =
dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
if (!ToExceptionDecl)
return nullptr;
}
Stmt *ToHandlerBlock = Importer.Import(S->getHandlerBlock());
if (!ToHandlerBlock && S->getHandlerBlock())
return nullptr;
return new (Importer.getToContext()) CXXCatchStmt(ToCatchLoc,
ToExceptionDecl,
ToHandlerBlock);
}
Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
SourceLocation ToTryLoc = Importer.Import(S->getTryLoc());
Stmt *ToTryBlock = Importer.Import(S->getTryBlock());
if (!ToTryBlock && S->getTryBlock())
return nullptr;
SmallVector<Stmt *, 1> ToHandlers(S->getNumHandlers());
for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) {
CXXCatchStmt *FromHandler = S->getHandler(HI);
if (Stmt *ToHandler = Importer.Import(FromHandler))
ToHandlers[HI] = ToHandler;
else
return nullptr;
}
return CXXTryStmt::Create(Importer.getToContext(), ToTryLoc, ToTryBlock,
ToHandlers);
}
Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
DeclStmt *ToRange =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt()));
if (!ToRange && S->getRangeStmt())
return nullptr;
DeclStmt *ToBeginEnd =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginEndStmt()));
if (!ToBeginEnd && S->getBeginEndStmt())
return nullptr;
Expr *ToCond = Importer.Import(S->getCond());
if (!ToCond && S->getCond())
return nullptr;
Expr *ToInc = Importer.Import(S->getInc());
if (!ToInc && S->getInc())
return nullptr;
DeclStmt *ToLoopVar =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt()));
if (!ToLoopVar && S->getLoopVarStmt())
return nullptr;
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
SourceLocation ToForLoc = Importer.Import(S->getForLoc());
SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
return new (Importer.getToContext()) CXXForRangeStmt(ToRange, ToBeginEnd,
ToCond, ToInc,
ToLoopVar, ToBody,
ToForLoc, ToColonLoc,
ToRParenLoc);
}
Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
Stmt *ToElem = Importer.Import(S->getElement());
if (!ToElem && S->getElement())
return nullptr;
Expr *ToCollect = Importer.Import(S->getCollection());
if (!ToCollect && S->getCollection())
return nullptr;
Stmt *ToBody = Importer.Import(S->getBody());
if (!ToBody && S->getBody())
return nullptr;
SourceLocation ToForLoc = Importer.Import(S->getForLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
return new (Importer.getToContext()) ObjCForCollectionStmt(ToElem,
ToCollect,
ToBody, ToForLoc,
ToRParenLoc);
}
Stmt *ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
SourceLocation ToAtCatchLoc = Importer.Import(S->getAtCatchLoc());
SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
VarDecl *ToExceptionDecl = nullptr;
if (VarDecl *FromExceptionDecl = S->getCatchParamDecl()) {
ToExceptionDecl =
dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
if (!ToExceptionDecl)
return nullptr;
}
Stmt *ToBody = Importer.Import(S->getCatchBody());
if (!ToBody && S->getCatchBody())
return nullptr;
return new (Importer.getToContext()) ObjCAtCatchStmt(ToAtCatchLoc,
ToRParenLoc,
ToExceptionDecl,
ToBody);
}
Stmt *ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
SourceLocation ToAtFinallyLoc = Importer.Import(S->getAtFinallyLoc());
Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyBody());
if (!ToAtFinallyStmt && S->getFinallyBody())
return nullptr;
return new (Importer.getToContext()) ObjCAtFinallyStmt(ToAtFinallyLoc,
ToAtFinallyStmt);
}
Stmt *ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
SourceLocation ToAtTryLoc = Importer.Import(S->getAtTryLoc());
Stmt *ToAtTryStmt = Importer.Import(S->getTryBody());
if (!ToAtTryStmt && S->getTryBody())
return nullptr;
SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI);
if (Stmt *ToCatchStmt = Importer.Import(FromCatchStmt))
ToCatchStmts[CI] = ToCatchStmt;
else
return nullptr;
}
Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyStmt());
if (!ToAtFinallyStmt && S->getFinallyStmt())
return nullptr;
return ObjCAtTryStmt::Create(Importer.getToContext(),
ToAtTryLoc, ToAtTryStmt,
ToCatchStmts.begin(), ToCatchStmts.size(),
ToAtFinallyStmt);
}
Stmt *ASTNodeImporter::VisitObjCAtSynchronizedStmt
(ObjCAtSynchronizedStmt *S) {
SourceLocation ToAtSynchronizedLoc =
Importer.Import(S->getAtSynchronizedLoc());
Expr *ToSynchExpr = Importer.Import(S->getSynchExpr());
if (!ToSynchExpr && S->getSynchExpr())
return nullptr;
Stmt *ToSynchBody = Importer.Import(S->getSynchBody());
if (!ToSynchBody && S->getSynchBody())
return nullptr;
return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
}
Stmt *ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
SourceLocation ToAtThrowLoc = Importer.Import(S->getThrowLoc());
Expr *ToThrow = Importer.Import(S->getThrowExpr());
if (!ToThrow && S->getThrowExpr())
return nullptr;
return new (Importer.getToContext()) ObjCAtThrowStmt(ToAtThrowLoc, ToThrow);
}
Stmt *ASTNodeImporter::VisitObjCAutoreleasePoolStmt
(ObjCAutoreleasePoolStmt *S) {
SourceLocation ToAtLoc = Importer.Import(S->getAtLoc());
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
if (!ToSubStmt && S->getSubStmt())
return nullptr;
return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(ToAtLoc,
ToSubStmt);
}
//----------------------------------------------------------------------------
// Import Expressions
//----------------------------------------------------------------------------
Expr *ASTNodeImporter::VisitExpr(Expr *E) {
Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node)
<< E->getStmtClassName();
return nullptr;
}
Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
if (!ToD)
return nullptr;
NamedDecl *FoundD = nullptr;
if (E->getDecl() != E->getFoundDecl()) {
FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
if (!FoundD)
return nullptr;
}
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
Importer.Import(E->getQualifierLoc()),
Importer.Import(E->getTemplateKeywordLoc()),
ToD,
E->refersToEnclosingVariableOrCapture(),
Importer.Import(E->getLocation()),
T, E->getValueKind(),
FoundD,
/*FIXME:TemplateArgs=*/nullptr);
if (E->hadMultipleCandidates())
DRE->setHadMultipleCandidates(true);
return DRE;
}
Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
return IntegerLiteral::Create(Importer.getToContext(),
E->getValue(), T,
Importer.Import(E->getLocation()));
}
Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
E->getKind(), T,
Importer.Import(E->getLocation()));
}
Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
Expr *SubExpr = Importer.Import(E->getSubExpr());
if (!SubExpr)
return nullptr;
return new (Importer.getToContext())
ParenExpr(Importer.Import(E->getLParen()),
Importer.Import(E->getRParen()),
SubExpr);
}
Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *SubExpr = Importer.Import(E->getSubExpr());
if (!SubExpr)
return nullptr;
return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
T, E->getValueKind(),
E->getObjectKind(),
Importer.Import(E->getOperatorLoc()));
}
Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTraitExpr *E) {
QualType ResultType = Importer.Import(E->getType());
if (E->isArgumentType()) {
TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
if (!TInfo)
return nullptr;
return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
TInfo, ResultType,
Importer.Import(E->getOperatorLoc()),
Importer.Import(E->getRParenLoc()));
}
Expr *SubExpr = Importer.Import(E->getArgumentExpr());
if (!SubExpr)
return nullptr;
return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
SubExpr, ResultType,
Importer.Import(E->getOperatorLoc()),
Importer.Import(E->getRParenLoc()));
}
Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *LHS = Importer.Import(E->getLHS());
if (!LHS)
return nullptr;
Expr *RHS = Importer.Import(E->getRHS());
if (!RHS)
return nullptr;
return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
T, E->getValueKind(),
E->getObjectKind(),
Importer.Import(E->getOperatorLoc()),
E->isFPContractable());
}
Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
QualType CompLHSType = Importer.Import(E->getComputationLHSType());
if (CompLHSType.isNull())
return nullptr;
QualType CompResultType = Importer.Import(E->getComputationResultType());
if (CompResultType.isNull())
return nullptr;
Expr *LHS = Importer.Import(E->getLHS());
if (!LHS)
return nullptr;
Expr *RHS = Importer.Import(E->getRHS());
if (!RHS)
return nullptr;
return new (Importer.getToContext())
CompoundAssignOperator(LHS, RHS, E->getOpcode(),
T, E->getValueKind(),
E->getObjectKind(),
CompLHSType, CompResultType,
Importer.Import(E->getOperatorLoc()),
E->isFPContractable());
}
static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
if (E->path_empty()) return false;
// TODO: import cast paths
return true;
}
Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *SubExpr = Importer.Import(E->getSubExpr());
if (!SubExpr)
return nullptr;
CXXCastPath BasePath;
if (ImportCastPath(E, BasePath))
return nullptr;
return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
SubExpr, &BasePath, E->getValueKind());
}
Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *SubExpr = Importer.Import(E->getSubExpr());
if (!SubExpr)
return nullptr;
TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
if (!TInfo && E->getTypeInfoAsWritten())
return nullptr;
CXXCastPath BasePath;
if (ImportCastPath(E, BasePath))
return nullptr;
return CStyleCastExpr::Create(Importer.getToContext(), T,
E->getValueKind(), E->getCastKind(),
SubExpr, &BasePath, TInfo,
Importer.Import(E->getLParenLoc()),
Importer.Import(E->getRParenLoc()));
}
Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
CXXConstructorDecl *ToCCD =
dyn_cast<CXXConstructorDecl>(Importer.Import(E->getConstructor()));
if (!ToCCD && E->getConstructor())
return nullptr;
size_t NumArgs = E->getNumArgs();
SmallVector<Expr *, 1> ToArgs(NumArgs);
ASTImporter &_Importer = Importer;
std::transform(E->arg_begin(), E->arg_end(), ToArgs.begin(),
[&_Importer](Expr *AE) -> Expr * {
return _Importer.Import(AE);
});
for (Expr *ToA : ToArgs) {
if (!ToA)
return nullptr;
}
return CXXConstructExpr::Create(Importer.getToContext(), T,
Importer.Import(E->getLocation()),
ToCCD, E->isElidable(),
ToArgs, E->hadMultipleCandidates(),
E->isListInitialization(),
E->isStdInitListInitialization(),
E->requiresZeroInitialization(),
E->getConstructionKind(),
Importer.Import(E->getParenOrBraceRange()));
}
Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *ToBase = Importer.Import(E->getBase());
if (!ToBase && E->getBase())
return nullptr;
ValueDecl *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
if (!ToMember && E->getMemberDecl())
return nullptr;
DeclAccessPair ToFoundDecl = DeclAccessPair::make(
dyn_cast<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl())),
E->getFoundDecl().getAccess());
DeclarationNameInfo ToMemberNameInfo(
Importer.Import(E->getMemberNameInfo().getName()),
Importer.Import(E->getMemberNameInfo().getLoc()));
if (E->hasExplicitTemplateArgs()) {
return nullptr; // FIXME: handle template arguments
}
return MemberExpr::Create(Importer.getToContext(), ToBase,
E->isArrow(),
Importer.Import(E->getOperatorLoc()),
Importer.Import(E->getQualifierLoc()),
Importer.Import(E->getTemplateKeywordLoc()),
ToMember, ToFoundDecl, ToMemberNameInfo,
nullptr, T, E->getValueKind(),
E->getObjectKind());
}
Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
Expr *ToCallee = Importer.Import(E->getCallee());
if (!ToCallee && E->getCallee())
return nullptr;
unsigned NumArgs = E->getNumArgs();
llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai) {
Expr *FromArg = E->getArg(ai);
Expr *ToArg = Importer.Import(FromArg);
if (!ToArg)
return nullptr;
ToArgs[ai] = ToArg;
}
Expr **ToArgs_Copied = new (Importer.getToContext())
Expr*[NumArgs];
for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai)
ToArgs_Copied[ai] = ToArgs[ai];
return new (Importer.getToContext())
CallExpr(Importer.getToContext(), ToCallee,
ArrayRef<Expr*>(ToArgs_Copied, NumArgs), T, E->getValueKind(),
Importer.Import(E->getRParenLoc()));
}
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager,
bool MinimalImport)
: ToContext(ToContext), FromContext(FromContext),
ToFileManager(ToFileManager), FromFileManager(FromFileManager),
Minimal(MinimalImport), LastDiagFromFrom(false)
{
ImportedDecls[FromContext.getTranslationUnitDecl()]
= ToContext.getTranslationUnitDecl();
}
ASTImporter::~ASTImporter() { }
QualType ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return QualType();
const Type *fromTy = FromT.getTypePtr();
// Check whether we've already imported this type.
llvm::DenseMap<const Type *, const Type *>::iterator Pos
= ImportedTypes.find(fromTy);
if (Pos != ImportedTypes.end())
return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
// Import the type
ASTNodeImporter Importer(*this);
QualType ToT = Importer.Visit(fromTy);
if (ToT.isNull())
return ToT;
// Record the imported type.
ImportedTypes[fromTy] = ToT.getTypePtr();
return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
}
TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
if (!FromTSI)
return FromTSI;
// FIXME: For now we just create a "trivial" type source info based
// on the type and a single location. Implement a real version of this.
QualType T = Import(FromTSI->getType());
if (T.isNull())
return nullptr;
return ToContext.getTrivialTypeSourceInfo(T,
Import(FromTSI->getTypeLoc().getLocStart()));
}
Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) {
llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
if (Pos != ImportedDecls.end()) {
Decl *ToD = Pos->second;
ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD);
return ToD;
} else {
return nullptr;
}
}
Decl *ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
ASTNodeImporter Importer(*this);
// Check whether we've already imported this declaration.
llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
if (Pos != ImportedDecls.end()) {
Decl *ToD = Pos->second;
Importer.ImportDefinitionIfNeeded(FromD, ToD);
return ToD;
}
// Import the type
Decl *ToD = Importer.Visit(FromD);
if (!ToD)
return nullptr;
// Record the imported declaration.
ImportedDecls[FromD] = ToD;
if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) {
// Keep track of anonymous tags that have an associated typedef.
if (FromTag->getTypedefNameForAnonDecl())
AnonTagsWithPendingTypedefs.push_back(FromTag);
} else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) {
// When we've finished transforming a typedef, see whether it was the
// typedef for an anonymous tag.
for (SmallVectorImpl<TagDecl *>::iterator
FromTag = AnonTagsWithPendingTypedefs.begin(),
FromTagEnd = AnonTagsWithPendingTypedefs.end();
FromTag != FromTagEnd; ++FromTag) {
if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) {
if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) {
// We found the typedef for an anonymous tag; link them.
ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD));
AnonTagsWithPendingTypedefs.erase(FromTag);
break;
}
}
}
}
return ToD;
}
DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
if (!ToDC)
return nullptr;
// When we're using a record/enum/Objective-C class/protocol as a context, we
// need it to have a definition.
if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
if (ToRecord->isCompleteDefinition()) {
// Do nothing.
} else if (FromRecord->isCompleteDefinition()) {
ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
ASTNodeImporter::IDK_Basic);
} else {
CompleteDecl(ToRecord);
}
} else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
if (ToEnum->isCompleteDefinition()) {
// Do nothing.
} else if (FromEnum->isCompleteDefinition()) {
ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
ASTNodeImporter::IDK_Basic);
} else {
CompleteDecl(ToEnum);
}
} else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
if (ToClass->getDefinition()) {
// Do nothing.
} else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
ASTNodeImporter::IDK_Basic);
} else {
CompleteDecl(ToClass);
}
} else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
if (ToProto->getDefinition()) {
// Do nothing.
} else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
ASTNodeImporter::IDK_Basic);
} else {
CompleteDecl(ToProto);
}
}
return ToDC;
}
Expr *ASTImporter::Import(Expr *FromE) {
if (!FromE)
return nullptr;
return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
}
Stmt *ASTImporter::Import(Stmt *FromS) {
if (!FromS)
return nullptr;
// Check whether we've already imported this declaration.
llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS);
if (Pos != ImportedStmts.end())
return Pos->second;
// Import the type
ASTNodeImporter Importer(*this);
Stmt *ToS = Importer.Visit(FromS);
if (!ToS)
return nullptr;
// Record the imported declaration.
ImportedStmts[FromS] = ToS;
return ToS;
}
NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
if (!FromNNS)
return nullptr;
NestedNameSpecifier *prefix = Import(FromNNS->getPrefix());
switch (FromNNS->getKind()) {
case NestedNameSpecifier::Identifier:
if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) {
return NestedNameSpecifier::Create(ToContext, prefix, II);
}
return nullptr;
case NestedNameSpecifier::Namespace:
if (NamespaceDecl *NS =
cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NS);
}
return nullptr;
case NestedNameSpecifier::NamespaceAlias:
if (NamespaceAliasDecl *NSAD =
cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
}
return nullptr;
case NestedNameSpecifier::Global:
return NestedNameSpecifier::GlobalSpecifier(ToContext);
case NestedNameSpecifier::Super:
if (CXXRecordDecl *RD =
cast<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
}
return nullptr;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
QualType T = Import(QualType(FromNNS->getAsType(), 0u));
if (!T.isNull()) {
bool bTemplate = FromNNS->getKind() ==
NestedNameSpecifier::TypeSpecWithTemplate;
return NestedNameSpecifier::Create(ToContext, prefix,
bTemplate, T.getTypePtr());
}
}
return nullptr;
}
llvm_unreachable("Invalid nested name specifier kind");
}
NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// FIXME: Implement!
return NestedNameSpecifierLoc();
}
TemplateName ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
if (TemplateDecl *ToTemplate
= cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
return TemplateName(ToTemplate);
return TemplateName();
case TemplateName::OverloadedTemplate: {
OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
UnresolvedSet<2> ToTemplates;
for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
E = FromStorage->end();
I != E; ++I) {
if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
ToTemplates.addDecl(To);
else
return TemplateName();
}
return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
ToTemplates.end());
}
case TemplateName::QualifiedTemplate: {
QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
if (!Qualifier)
return TemplateName();
if (TemplateDecl *ToTemplate
= cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
return ToContext.getQualifiedTemplateName(Qualifier,
QTN->hasTemplateKeyword(),
ToTemplate);
return TemplateName();
}
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = From.getAsDependentTemplateName();
NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
if (!Qualifier)
return TemplateName();
if (DTN->isIdentifier()) {
return ToContext.getDependentTemplateName(Qualifier,
Import(DTN->getIdentifier()));
}
return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
}
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *subst
= From.getAsSubstTemplateTemplateParm();
TemplateTemplateParmDecl *param
= cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
if (!param)
return TemplateName();
TemplateName replacement = Import(subst->getReplacement());
if (replacement.isNull()) return TemplateName();
return ToContext.getSubstTemplateTemplateParm(param, replacement);
}
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
= From.getAsSubstTemplateTemplateParmPack();
TemplateTemplateParmDecl *Param
= cast_or_null<TemplateTemplateParmDecl>(
Import(SubstPack->getParameterPack()));
if (!Param)
return TemplateName();
ASTNodeImporter Importer(*this);
TemplateArgument ArgPack
= Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
if (ArgPack.isNull())
return TemplateName();
return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
}
}
llvm_unreachable("Invalid template name kind");
}
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
return SourceLocation();
SourceManager &FromSM = FromContext.getSourceManager();
// For now, map everything down to its spelling location, so that we
// don't have to import macro expansions.
// FIXME: Import macro expansions!
FromLoc = FromSM.getSpellingLoc(FromLoc);
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
SourceManager &ToSM = ToContext.getSourceManager();
FileID ToFileID = Import(Decomposed.first);
if (ToFileID.isInvalid())
return SourceLocation();
SourceLocation ret = ToSM.getLocForStartOfFile(ToFileID)
.getLocWithOffset(Decomposed.second);
return ret;
}
SourceRange ASTImporter::Import(SourceRange FromRange) {
return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
}
FileID ASTImporter::Import(FileID FromID) {
llvm::DenseMap<FileID, FileID>::iterator Pos
= ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
return Pos->second;
SourceManager &FromSM = FromContext.getSourceManager();
SourceManager &ToSM = ToContext.getSourceManager();
const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
// Include location of this file.
SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
// Map the FileID for to the "to" source manager.
FileID ToID;
const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
// FIXME: We probably want to use getVirtualFile(), so we don't hit the
// disk again
// FIXME: We definitely want to re-use the existing MemoryBuffer, rather
// than mmap the files several times.
const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
if (!Entry)
return FileID();
ToID = ToSM.createFileID(Entry, ToIncludeLoc,
FromSLoc.getFile().getFileCharacteristic());
} else {
// FIXME: We want to re-use the existing MemoryBuffer!
const llvm::MemoryBuffer *
FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
std::unique_ptr<llvm::MemoryBuffer> ToBuf
= llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
FromBuf->getBufferIdentifier());
ToID = ToSM.createFileID(std::move(ToBuf),
FromSLoc.getFile().getFileCharacteristic());
}
ImportedFileIDs[FromID] = ToID;
return ToID;
}
void ASTImporter::ImportDefinition(Decl *From) {
Decl *To = Import(From);
if (!To)
return;
if (DeclContext *FromDC = cast<DeclContext>(From)) {
ASTNodeImporter Importer(*this);
if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
if (!ToRecord->getDefinition()) {
Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
ASTNodeImporter::IDK_Everything);
return;
}
}
if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
if (!ToEnum->getDefinition()) {
Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
ASTNodeImporter::IDK_Everything);
return;
}
}
if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
if (!ToIFace->getDefinition()) {
Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
ASTNodeImporter::IDK_Everything);
return;
}
}
if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
if (!ToProto->getDefinition()) {
Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
ASTNodeImporter::IDK_Everything);
return;
}
}
Importer.ImportDeclContext(FromDC, true);
}
}
DeclarationName ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
return DeclarationName();
switch (FromName.getNameKind()) {
case DeclarationName::Identifier:
return Import(FromName.getAsIdentifierInfo());
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
return Import(FromName.getObjCSelector());
case DeclarationName::CXXConstructorName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
return DeclarationName();
return ToContext.DeclarationNames.getCXXConstructorName(
ToContext.getCanonicalType(T));
}
case DeclarationName::CXXDestructorName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
return DeclarationName();
return ToContext.DeclarationNames.getCXXDestructorName(
ToContext.getCanonicalType(T));
}
case DeclarationName::CXXConversionFunctionName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
return DeclarationName();
return ToContext.DeclarationNames.getCXXConversionFunctionName(
ToContext.getCanonicalType(T));
}
case DeclarationName::CXXOperatorName:
return ToContext.DeclarationNames.getCXXOperatorName(
FromName.getCXXOverloadedOperator());
case DeclarationName::CXXLiteralOperatorName:
return ToContext.DeclarationNames.getCXXLiteralOperatorName(
Import(FromName.getCXXLiteralIdentifier()));
case DeclarationName::CXXUsingDirective:
// FIXME: STATICS!
return DeclarationName::getUsingDirectiveName();
}
llvm_unreachable("Invalid DeclarationName Kind!");
}
IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
if (!FromId)
return nullptr;
return &ToContext.Idents.get(FromId->getName());
}
Selector ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
return Selector();
SmallVector<IdentifierInfo *, 4> Idents;
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
}
DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
DeclContext *DC,
unsigned IDNS,
NamedDecl **Decls,
unsigned NumDecls) {
return Name;
}
DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) {
if (LastDiagFromFrom)
ToContext.getDiagnostics().notePriorDiagnosticFrom(
FromContext.getDiagnostics());
LastDiagFromFrom = false;
return ToContext.getDiagnostics().Report(Loc, DiagID);
}
DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
if (!LastDiagFromFrom)
FromContext.getDiagnostics().notePriorDiagnosticFrom(
ToContext.getDiagnostics());
LastDiagFromFrom = true;
return FromContext.getDiagnostics().Report(Loc, DiagID);
}
void ASTImporter::CompleteDecl (Decl *D) {
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
if (!ID->getDefinition())
ID->startDefinition();
}
else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
if (!PD->getDefinition())
PD->startDefinition();
}
else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
if (!TD->getDefinition() && !TD->isBeingDefined()) {
TD->startDefinition();
TD->setCompleteDefinition(true);
}
}
else {
assert (0 && "CompleteDecl called on a Decl that can't be completed");
}
}
Decl *ASTImporter::Imported(Decl *From, Decl *To) {
ImportedDecls[From] = To;
return To;
}
bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To,
bool Complain) {
llvm::DenseMap<const Type *, const Type *>::iterator Pos
= ImportedTypes.find(From.getTypePtr());
if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
return true;
StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls,
false, Complain);
return Ctx.IsStructurallyEquivalent(From, To);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ExprConstant.cpp | //===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Expr constant evaluator.
//
// Constant expression evaluation produces four main results:
//
// * A success/failure flag indicating whether constant folding was successful.
// This is the 'bool' return value used by most of the code in this file. A
// 'false' return value indicates that constant folding has failed, and any
// appropriate diagnostic has already been produced.
//
// * An evaluated result, valid only if constant folding has not failed.
//
// * A flag indicating if evaluation encountered (unevaluated) side-effects.
// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
// where it is possible to determine the evaluated result regardless.
//
// * A set of notes indicating why the evaluation was not a constant expression
// (under the C++11 / C++1y rules only, at the moment), or, if folding failed
// too, why the expression could not be folded.
//
// If we are checking for a potential constant expression, failure to constant
// fold a potential constant sub-expression will be indicated by a 'false'
// return value (the expression could not be folded) and no diagnostic (the
// expression is not necessarily non-constant).
//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - constant evaluator is language-dependant
#include "dxc/HlslIntrinsicOp.h" // HLSL Change
using namespace clang;
using llvm::APSInt;
using llvm::APFloat;
static bool IsGlobalLValue(APValue::LValueBase B);
// HLSL Change Starts - reuse code from CallExpr::getBuiltinCallee
static const FunctionDecl *GetCallExprFunction(const CallExpr *CE) {
const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE->getCallee());
if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
return 0;
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
if (!DRE)
return 0;
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
if (!FDecl)
return 0;
if (!FDecl->getIdentifier())
return 0;
return FDecl;
}
// Returns true if the given InitListExpr is for constructing a HLSL vector
// with the matching number of initializers and each initializer has the
// matching element type.
static bool IsHLSLVecInitList(const Expr* expr) {
if (const auto* initExpr = dyn_cast<InitListExpr>(expr)) {
const QualType vecType = initExpr->getType();
if (!hlsl::IsHLSLVecType(vecType))
return false;
const uint32_t size = hlsl::GetHLSLVecSize(vecType);
const QualType elemType = hlsl::GetHLSLVecElementType(vecType).getCanonicalType();
if (initExpr->getNumInits() != size)
return false;
for (uint32_t i = 0; i < size; ++i)
if (initExpr->getInit(i)->getType().getCanonicalType() != elemType)
return false;
return true;
}
return false;
}
// HLSL Change Ends
namespace {
struct LValue;
struct CallStackFrame;
struct EvalInfo;
static QualType getType(APValue::LValueBase B) {
if (!B) return QualType();
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
return D->getType();
const Expr *Base = B.get<const Expr*>();
// For a materialized temporary, the type of the temporary we materialized
// may not be the type of the expression.
if (const MaterializeTemporaryExpr *MTE =
dyn_cast<MaterializeTemporaryExpr>(Base)) {
SmallVector<const Expr *, 2> CommaLHSs;
SmallVector<SubobjectAdjustment, 2> Adjustments;
const Expr *Temp = MTE->GetTemporaryExpr();
const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
Adjustments);
// Keep any cv-qualifiers from the reference if we generated a temporary
// for it.
if (Inner != Temp)
return Inner->getType();
}
return Base->getType();
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// field or base class.
static
APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
APValue::BaseOrMemberType Value;
Value.setFromOpaqueValue(E.BaseOrMember);
return Value;
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// field declaration.
static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
}
/// Get an LValue path entry, which is known to not be an array index, as a
/// base class declaration.
static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
}
/// Determine whether this LValue path entry for a base class names a virtual
/// base class.
static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
return getAsBaseOrMember(E).getInt();
}
/// Find the path length and type of the most-derived subobject in the given
/// path, and find the size of the containing array, if any.
static
unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
ArrayRef<APValue::LValuePathEntry> Path,
uint64_t &ArraySize, QualType &Type) {
unsigned MostDerivedLength = 0;
Type = Base;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (Type->isArrayType()) {
const ConstantArrayType *CAT =
cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
Type = CAT->getElementType();
ArraySize = CAT->getSize().getZExtValue();
MostDerivedLength = I + 1;
} else if (Type->isAnyComplexType()) {
const ComplexType *CT = Type->castAs<ComplexType>();
Type = CT->getElementType();
ArraySize = 2;
MostDerivedLength = I + 1;
} else if (const FieldDecl *FD = getAsField(Path[I])) {
Type = FD->getType();
ArraySize = 0;
MostDerivedLength = I + 1;
} else {
// Path[I] describes a base class.
ArraySize = 0;
}
}
return MostDerivedLength;
}
// The order of this enum is important for diagnostics.
enum CheckSubobjectKind {
CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
CSK_This, CSK_Real, CSK_Imag
};
/// A path from a glvalue to a subobject of that glvalue.
struct SubobjectDesignator {
/// True if the subobject was named in a manner not supported by C++11. Such
/// lvalues can still be folded, but they are not core constant expressions
/// and we cannot perform lvalue-to-rvalue conversions on them.
bool Invalid : 1;
/// Is this a pointer one past the end of an object?
bool IsOnePastTheEnd : 1;
/// The length of the path to the most-derived object of which this is a
/// subobject.
unsigned MostDerivedPathLength : 30;
/// The size of the array of which the most-derived object is an element, or
/// 0 if the most-derived object is not an array element.
uint64_t MostDerivedArraySize;
/// The type of the most derived object referred to by this address.
QualType MostDerivedType;
typedef APValue::LValuePathEntry PathEntry;
/// The entries on the path from the glvalue to the designated subobject.
SmallVector<PathEntry, 8> Entries;
SubobjectDesignator() : Invalid(true) {}
explicit SubobjectDesignator(QualType T)
: Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0),
MostDerivedArraySize(0), MostDerivedType(T) {}
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
: Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
MostDerivedPathLength(0), MostDerivedArraySize(0) {
if (!Invalid) {
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
ArrayRef<PathEntry> VEntries = V.getLValuePath();
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
if (V.getLValueBase())
MostDerivedPathLength =
findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
V.getLValuePath(), MostDerivedArraySize,
MostDerivedType);
}
}
void setInvalid() {
Invalid = true;
Entries.clear();
}
/// Determine whether this is a one-past-the-end pointer.
bool isOnePastTheEnd() const {
assert(!Invalid);
if (IsOnePastTheEnd)
return true;
if (MostDerivedArraySize &&
Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
return true;
return false;
}
/// Check that this refers to a valid subobject.
bool isValidSubobject() const {
if (Invalid)
return false;
return !isOnePastTheEnd();
}
/// Check that this refers to a valid subobject, and if not, produce a
/// relevant diagnostic and set the designator as invalid.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
/// Update this designator to refer to the first element within this array.
void addArrayUnchecked(const ConstantArrayType *CAT) {
PathEntry Entry;
Entry.ArrayIndex = 0;
Entries.push_back(Entry);
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
MostDerivedArraySize = CAT->getSize().getZExtValue();
MostDerivedPathLength = Entries.size();
}
/// Update this designator to refer to the given base or member of this
/// object.
void addDeclUnchecked(const Decl *D, bool Virtual = false) {
PathEntry Entry;
APValue::BaseOrMemberType Value(D, Virtual);
Entry.BaseOrMember = Value.getOpaqueValue();
Entries.push_back(Entry);
// If this isn't a base class, it's a new most-derived object.
if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
MostDerivedType = FD->getType();
MostDerivedArraySize = 0;
MostDerivedPathLength = Entries.size();
}
}
/// Update this designator to refer to the given complex component.
void addComplexUnchecked(QualType EltTy, bool Imag) {
PathEntry Entry;
Entry.ArrayIndex = Imag;
Entries.push_back(Entry);
// This is technically a most-derived object, though in practice this
// is unlikely to matter.
MostDerivedType = EltTy;
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
/// Add N to the address of this subobject.
void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
if (Invalid) return;
if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) {
Entries.back().ArrayIndex += N;
if (Entries.back().ArrayIndex > MostDerivedArraySize) {
diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
setInvalid();
}
return;
}
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
if (IsOnePastTheEnd && N == (uint64_t)-1)
IsOnePastTheEnd = false;
else if (!IsOnePastTheEnd && N == 1)
IsOnePastTheEnd = true;
else if (N != 0) {
diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
setInvalid();
}
}
};
/// A stack frame in the constexpr call stack.
struct CallStackFrame {
EvalInfo &Info;
/// Parent - The caller of this stack frame.
CallStackFrame *Caller;
/// CallLoc - The location of the call expression for this call.
SourceLocation CallLoc;
/// Callee - The function which was called.
const FunctionDecl *Callee;
/// Index - The call index of this call.
unsigned Index;
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
/// Arguments - Parameter bindings for this function call, indexed by
/// parameters' function scope indices.
APValue *Arguments;
// Note that we intentionally use std::map here so that references to
// values are stable.
typedef std::map<const void*, APValue> MapTy;
typedef MapTy::const_iterator temp_iterator;
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments);
~CallStackFrame();
APValue *getTemporary(const void *Key) {
MapTy::iterator I = Temporaries.find(Key);
return I == Temporaries.end() ? nullptr : &I->second;
}
APValue &createTemporary(const void *Key, bool IsLifetimeExtended);
};
/// Temporarily override 'this'.
class ThisOverrideRAII {
public:
ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
: Frame(Frame), OldThis(Frame.This) {
if (Enable)
Frame.This = NewThis;
}
~ThisOverrideRAII() {
Frame.This = OldThis;
}
private:
CallStackFrame &Frame;
const LValue *OldThis;
};
/// A partial diagnostic which we might know in advance that we are not going
/// to emit.
class OptionalDiagnostic {
PartialDiagnostic *Diag;
public:
explicit OptionalDiagnostic(PartialDiagnostic *Diag = nullptr)
: Diag(Diag) {}
template<typename T>
OptionalDiagnostic &operator<<(const T &v) {
if (Diag)
*Diag << v;
return *this;
}
OptionalDiagnostic &operator<<(const APSInt &I) {
if (Diag) {
SmallVector<char, 32> Buffer;
I.toString(Buffer);
*Diag << StringRef(Buffer.data(), Buffer.size());
}
return *this;
}
OptionalDiagnostic &operator<<(const APFloat &F) {
if (Diag) {
// FIXME: Force the precision of the source value down so we don't
// print digits which are usually useless (we don't really care here if
// we truncate a digit by accident in edge cases). Ideally,
// APFloat::toString would automatically print the shortest
// representation which rounds to the correct value, but it's a bit
// tricky to implement.
unsigned precision =
llvm::APFloat::semanticsPrecision(F.getSemantics());
precision = (precision * 59 + 195) / 196;
SmallVector<char, 32> Buffer;
F.toString(Buffer, precision);
*Diag << StringRef(Buffer.data(), Buffer.size());
}
return *this;
}
};
/// A cleanup, and a flag indicating whether it is lifetime-extended.
class Cleanup {
llvm::PointerIntPair<APValue*, 1, bool> Value;
public:
Cleanup(APValue *Val, bool IsLifetimeExtended)
: Value(Val, IsLifetimeExtended) {}
bool isLifetimeExtended() const { return Value.getInt(); }
void endLifetime() {
*Value.getPointer() = APValue();
}
};
/// EvalInfo - This is a private struct used by the evaluator to capture
/// information about a subexpression as it is folded. It retains information
/// about the AST context, but also maintains information about the folded
/// expression.
///
/// If an expression could be evaluated, it is still possible it is not a C
/// "integer constant expression" or constant expression. If not, this struct
/// captures information about how and why not.
///
/// One bit of information passed *into* the request for constant folding
/// indicates whether the subexpression is "evaluated" or not according to C
/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
struct EvalInfo {
ASTContext &Ctx;
/// EvalStatus - Contains information about the evaluation.
Expr::EvalStatus &EvalStatus;
/// CurrentCall - The top of the constexpr call stack.
CallStackFrame *CurrentCall;
/// CallStackDepth - The number of calls in the call stack right now.
unsigned CallStackDepth;
/// NextCallIndex - The next call index to assign.
unsigned NextCallIndex;
/// StepsLeft - The remaining number of evaluation steps we're permitted
/// to perform. This is essentially a limit for the number of statements
/// we will evaluate.
unsigned StepsLeft;
/// BottomFrame - The frame in which evaluation started. This must be
/// initialized after CurrentCall and CallStackDepth.
CallStackFrame BottomFrame;
/// A stack of values whose lifetimes end at the end of some surrounding
/// evaluation frame.
llvm::SmallVector<Cleanup, 16> CleanupStack;
/// EvaluatingDecl - This is the declaration whose initializer is being
/// evaluated, if any.
APValue::LValueBase EvaluatingDecl;
/// EvaluatingDeclValue - This is the value being constructed for the
/// declaration whose initializer is being evaluated, if any.
APValue *EvaluatingDeclValue;
/// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
/// notes attached to it will also be stored, otherwise they will not be.
bool HasActiveDiagnostic;
enum EvaluationMode {
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression.
EM_ConstantExpression,
/// Evaluate as a potential constant expression. Keep going if we hit a
/// construct that we can't evaluate yet (because we don't yet know the
/// value of something) but stop if we hit something that could never be
/// a constant expression.
EM_PotentialConstantExpression,
/// Fold the expression to a constant. Stop if we hit a side-effect that
/// we can't model.
EM_ConstantFold,
/// Evaluate the expression looking for integer overflow and similar
/// issues. Don't worry about side-effects, and try to visit all
/// subexpressions.
EM_EvaluateForOverflow,
/// Evaluate in any way we know how. Don't worry about side-effects that
/// can't be modeled.
EM_IgnoreSideEffects,
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression. Some expressions can be retried in the
/// optimizer if we don't constant fold them here, but in an unevaluated
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
EM_ConstantExpressionUnevaluated,
/// Evaluate as a potential constant expression. Keep going if we hit a
/// construct that we can't evaluate yet (because we don't yet know the
/// value of something) but stop if we hit something that could never be
/// a constant expression. Some expressions can be retried in the
/// optimizer if we don't constant fold them here, but in an unevaluated
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
EM_PotentialConstantExpressionUnevaluated
} EvalMode;
/// Are we checking whether the expression is a potential constant
/// expression?
bool checkingPotentialConstantExpression() const {
return EvalMode == EM_PotentialConstantExpression ||
EvalMode == EM_PotentialConstantExpressionUnevaluated;
}
/// Are we checking an expression for overflow?
// FIXME: We should check for any kind of undefined or suspicious behavior
// in such constructs, not just overflow.
bool checkingForOverflow() { return EvalMode == EM_EvaluateForOverflow; }
EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
: Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
CallStackDepth(0), NextCallIndex(1),
StepsLeft(getLangOpts().ConstexprStepLimit),
BottomFrame(*this, SourceLocation(), nullptr, nullptr, nullptr),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
EvalMode(Mode) {}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
EvaluatingDeclValue = &Value;
}
const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
bool CheckCallLimit(SourceLocation Loc) {
// Don't perform any constexpr calls (other than the call we're checking)
// when checking a potential constant expression.
if (checkingPotentialConstantExpression() && CallStackDepth > 1)
return false;
if (NextCallIndex == 0) {
// NextCallIndex has wrapped around.
Diag(Loc, diag::note_constexpr_call_limit_exceeded);
return false;
}
if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
return true;
Diag(Loc, diag::note_constexpr_depth_limit_exceeded)
<< getLangOpts().ConstexprCallDepth;
return false;
}
CallStackFrame *getCallFrame(unsigned CallIndex) {
assert(CallIndex && "no call index in getCallFrame");
// We will eventually hit BottomFrame, which has Index 1, so Frame can't
// be null in this loop.
CallStackFrame *Frame = CurrentCall;
while (Frame->Index > CallIndex)
Frame = Frame->Caller;
return (Frame->Index == CallIndex) ? Frame : nullptr;
}
bool nextStep(const Stmt *S) {
if (!StepsLeft) {
Diag(S->getLocStart(), diag::note_constexpr_step_limit_exceeded);
return false;
}
--StepsLeft;
return true;
}
private:
/// Add a diagnostic to the diagnostics list.
PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) {
PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
return EvalStatus.Diag->back().second;
}
/// Add notes containing a call stack to the current point of evaluation.
void addCallStack(unsigned Limit);
public:
/// Diagnose that the evaluation cannot be folded.
OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0) {
if (EvalStatus.Diag) {
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
// unless we require this evaluation to produce a constant expression.
//
// FIXME: We might want to show both diagnostics to the user in
// EM_ConstantFold mode.
if (!EvalStatus.Diag->empty()) {
switch (EvalMode) {
case EM_ConstantFold:
case EM_IgnoreSideEffects:
case EM_EvaluateForOverflow:
if (!EvalStatus.HasSideEffects)
break;
// We've had side-effects; we want the diagnostic from them, not
// some later problem.
LLVM_FALLTHROUGH; // HLSL Change
case EM_ConstantExpression:
case EM_PotentialConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_PotentialConstantExpressionUnevaluated:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
}
unsigned CallStackNotes = CallStackDepth - 1;
unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
if (Limit)
CallStackNotes = std::min(CallStackNotes, Limit + 1);
if (checkingPotentialConstantExpression())
CallStackNotes = 0;
HasActiveDiagnostic = true;
EvalStatus.Diag->clear();
EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
addDiag(Loc, DiagId);
if (!checkingPotentialConstantExpression())
addCallStack(Limit);
return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second);
}
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0) {
if (EvalStatus.Diag)
return Diag(E->getExprLoc(), DiagId, ExtraNotes);
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
/// Diagnose that the evaluation does not produce a C++11 core constant
/// expression.
///
/// FIXME: Stop evaluating if we're in EM_ConstantExpression or
/// EM_PotentialConstantExpression mode and we produce one of these.
template<typename LocArg>
OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId
= diag::note_invalid_subexpr_in_const_expr,
unsigned ExtraNotes = 0) {
// Don't override a previous diagnostic. Don't bother collecting
// diagnostics if we're evaluating for overflow.
if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
return Diag(Loc, DiagId, ExtraNotes);
}
/// Add a note to a prior diagnostic.
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) {
if (!HasActiveDiagnostic)
return OptionalDiagnostic();
return OptionalDiagnostic(&addDiag(Loc, DiagId));
}
/// Add a stack of notes to a prior diagnostic.
void addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
if (HasActiveDiagnostic) {
EvalStatus.Diag->insert(EvalStatus.Diag->end(),
Diags.begin(), Diags.end());
}
}
/// Should we continue evaluation after encountering a side-effect that we
/// couldn't model?
bool keepEvaluatingAfterSideEffect() {
switch (EvalMode) {
case EM_PotentialConstantExpression:
case EM_PotentialConstantExpressionUnevaluated:
case EM_EvaluateForOverflow:
case EM_IgnoreSideEffects:
return true;
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
}
/// Note that we have had a side-effect, and determine whether we should
/// keep evaluating.
bool noteSideEffect() {
EvalStatus.HasSideEffects = true;
return keepEvaluatingAfterSideEffect();
}
/// Should we continue evaluation as much as possible after encountering a
/// construct which can't be reduced to a value?
bool keepEvaluatingAfterFailure() {
if (!StepsLeft)
return false;
switch (EvalMode) {
case EM_PotentialConstantExpression:
case EM_PotentialConstantExpressionUnevaluated:
case EM_EvaluateForOverflow:
return true;
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
return false;
}
llvm_unreachable("Missed EvalMode case");
}
};
/// Object used to treat all foldable expressions as constant expressions.
struct FoldConstant {
EvalInfo &Info;
bool Enabled;
bool HadNoPriorDiags;
EvalInfo::EvaluationMode OldMode;
explicit FoldConstant(EvalInfo &Info, bool Enabled)
: Info(Info),
Enabled(Enabled),
HadNoPriorDiags(Info.EvalStatus.Diag &&
Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects),
OldMode(Info.EvalMode) {
if (Enabled &&
(Info.EvalMode == EvalInfo::EM_ConstantExpression ||
Info.EvalMode == EvalInfo::EM_ConstantExpressionUnevaluated))
Info.EvalMode = EvalInfo::EM_ConstantFold;
}
void keepDiagnostics() { Enabled = false; }
~FoldConstant() {
if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects)
Info.EvalStatus.Diag->clear();
Info.EvalMode = OldMode;
}
};
/// RAII object used to suppress diagnostics and side-effects from a
/// speculative evaluation.
class SpeculativeEvaluationRAII {
EvalInfo &Info;
Expr::EvalStatus Old;
public:
SpeculativeEvaluationRAII(EvalInfo &Info,
SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
: Info(Info), Old(Info.EvalStatus) {
Info.EvalStatus.Diag = NewDiag;
// If we're speculatively evaluating, we may have skipped over some
// evaluations and missed out a side effect.
Info.EvalStatus.HasSideEffects = true;
}
~SpeculativeEvaluationRAII() {
Info.EvalStatus = Old;
}
};
/// RAII object wrapping a full-expression or block scope, and handling
/// the ending of the lifetime of temporaries created within it.
template<bool IsFullExpression>
class ScopeRAII {
EvalInfo &Info;
unsigned OldStackSize;
public:
ScopeRAII(EvalInfo &Info)
: Info(Info), OldStackSize(Info.CleanupStack.size()) {}
~ScopeRAII() {
// Body moved to a static method to encourage the compiler to inline away
// instances of this class.
cleanup(Info, OldStackSize);
}
private:
static void cleanup(EvalInfo &Info, unsigned OldStackSize) {
unsigned NewEnd = OldStackSize;
for (unsigned I = OldStackSize, N = Info.CleanupStack.size();
I != N; ++I) {
if (IsFullExpression && Info.CleanupStack[I].isLifetimeExtended()) {
// Full-expression cleanup of a lifetime-extended temporary: nothing
// to do, just move this cleanup to the right place in the stack.
std::swap(Info.CleanupStack[I], Info.CleanupStack[NewEnd]);
++NewEnd;
} else {
// End the lifetime of the object.
Info.CleanupStack[I].endLifetime();
}
}
Info.CleanupStack.erase(Info.CleanupStack.begin() + NewEnd,
Info.CleanupStack.end());
}
};
typedef ScopeRAII<false> BlockScopeRAII;
typedef ScopeRAII<true> FullExpressionRAII;
}
bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
CheckSubobjectKind CSK) {
if (Invalid)
return false;
if (isOnePastTheEnd()) {
Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
<< CSK;
setInvalid();
return false;
}
return true;
}
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
const Expr *E, uint64_t N) {
if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize)
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< static_cast<int>(N) << /*array*/ 0
<< static_cast<unsigned>(MostDerivedArraySize);
else
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< static_cast<int>(N) << /*non-array*/ 1;
setInvalid();
}
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments)
: Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
CallStackFrame::~CallStackFrame() {
assert(Info.CurrentCall == this && "calls retired out of order");
--Info.CallStackDepth;
Info.CurrentCall = Caller;
}
APValue &CallStackFrame::createTemporary(const void *Key,
bool IsLifetimeExtended) {
APValue &Result = Temporaries[Key];
assert(Result.isUninit() && "temporary created multiple times");
Info.CleanupStack.push_back(Cleanup(&Result, IsLifetimeExtended));
return Result;
}
static void describeCall(CallStackFrame *Frame, raw_ostream &Out);
void EvalInfo::addCallStack(unsigned Limit) {
// Determine which calls to skip, if any.
unsigned ActiveCalls = CallStackDepth - 1;
unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart;
if (Limit && Limit < ActiveCalls) {
SkipStart = Limit / 2 + Limit % 2;
SkipEnd = ActiveCalls - Limit / 2;
}
// Walk the call stack and add the diagnostics.
unsigned CallIdx = 0;
for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame;
Frame = Frame->Caller, ++CallIdx) {
// Skip this call?
if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
if (CallIdx == SkipStart) {
// Note that we're skipping calls.
addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed)
<< unsigned(ActiveCalls - Limit);
}
continue;
}
SmallVector<char, 128> Buffer;
llvm::raw_svector_ostream Out(Buffer);
describeCall(Frame, Out);
addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str();
}
}
namespace {
struct ComplexValue {
private:
bool IsInt;
public:
APSInt IntReal, IntImag;
APFloat FloatReal, FloatImag;
ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
void makeComplexFloat() { IsInt = false; }
bool isComplexFloat() const { return !IsInt; }
APFloat &getComplexFloatReal() { return FloatReal; }
APFloat &getComplexFloatImag() { return FloatImag; }
void makeComplexInt() { IsInt = true; }
bool isComplexInt() const { return IsInt; }
APSInt &getComplexIntReal() { return IntReal; }
APSInt &getComplexIntImag() { return IntImag; }
void moveInto(APValue &v) const {
if (isComplexFloat())
v = APValue(FloatReal, FloatImag);
else
v = APValue(IntReal, IntImag);
}
void setFrom(const APValue &v) {
assert(v.isComplexFloat() || v.isComplexInt());
if (v.isComplexFloat()) {
makeComplexFloat();
FloatReal = v.getComplexFloatReal();
FloatImag = v.getComplexFloatImag();
} else {
makeComplexInt();
IntReal = v.getComplexIntReal();
IntImag = v.getComplexIntImag();
}
}
};
struct LValue {
APValue::LValueBase Base;
CharUnits Offset;
unsigned CallIndex;
SubobjectDesignator Designator;
const APValue::LValueBase getLValueBase() const { return Base; }
CharUnits &getLValueOffset() { return Offset; }
const CharUnits &getLValueOffset() const { return Offset; }
unsigned getLValueCallIndex() const { return CallIndex; }
SubobjectDesignator &getLValueDesignator() { return Designator; }
const SubobjectDesignator &getLValueDesignator() const { return Designator;}
void moveInto(APValue &V) const {
if (Designator.Invalid)
V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
else
V = APValue(Base, Offset, Designator.Entries,
Designator.IsOnePastTheEnd, CallIndex);
}
void setFrom(ASTContext &Ctx, const APValue &V) {
assert(V.isLValue());
Base = V.getLValueBase();
Offset = V.getLValueOffset();
CallIndex = V.getLValueCallIndex();
Designator = SubobjectDesignator(Ctx, V);
}
void set(APValue::LValueBase B, unsigned I = 0) {
Base = B;
Offset = CharUnits::Zero();
CallIndex = I;
Designator = SubobjectDesignator(getType(B));
}
// Check that this LValue is not based on a null pointer. If it is, produce
// a diagnostic and mark the designator as invalid.
bool checkNullPointer(EvalInfo &Info, const Expr *E,
CheckSubobjectKind CSK) {
if (Designator.Invalid)
return false;
if (!Base) {
Info.CCEDiag(E, diag::note_constexpr_null_subobject)
<< CSK;
Designator.setInvalid();
return false;
}
return true;
}
// Check this LValue refers to an object. If not, set the designator to be
// invalid and emit a diagnostic.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
// Outside C++11, do not build a designator referring to a subobject of
// any object: we won't use such a designator for anything.
if (!Info.getLangOpts().CPlusPlus11)
Designator.setInvalid();
return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
Designator.checkSubobject(Info, E, CSK);
}
void addDecl(EvalInfo &Info, const Expr *E,
const Decl *D, bool Virtual = false) {
if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
Designator.addDeclUnchecked(D, Virtual);
}
void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
if (checkSubobject(Info, E, CSK_ArrayToPointer))
Designator.addArrayUnchecked(CAT);
}
void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
Designator.addComplexUnchecked(EltTy, Imag);
}
void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
if (N && checkNullPointer(Info, E, CSK_ArrayIndex))
Designator.adjustIndex(Info, E, N);
}
};
struct MemberPtr {
MemberPtr() {}
explicit MemberPtr(const ValueDecl *Decl) :
DeclAndIsDerivedMember(Decl, false), Path() {}
/// The member or (direct or indirect) field referred to by this member
/// pointer, or 0 if this is a null member pointer.
const ValueDecl *getDecl() const {
return DeclAndIsDerivedMember.getPointer();
}
/// Is this actually a member of some type derived from the relevant class?
bool isDerivedMember() const {
return DeclAndIsDerivedMember.getInt();
}
/// Get the class which the declaration actually lives in.
const CXXRecordDecl *getContainingRecord() const {
return cast<CXXRecordDecl>(
DeclAndIsDerivedMember.getPointer()->getDeclContext());
}
void moveInto(APValue &V) const {
V = APValue(getDecl(), isDerivedMember(), Path);
}
void setFrom(const APValue &V) {
assert(V.isMemberPointer());
DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
Path.clear();
ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
Path.insert(Path.end(), P.begin(), P.end());
}
/// DeclAndIsDerivedMember - The member declaration, and a flag indicating
/// whether the member is a member of some class derived from the class type
/// of the member pointer.
llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
/// Path - The path of base/derived classes from the member declaration's
/// class (exclusive) to the class type of the member pointer (inclusive).
SmallVector<const CXXRecordDecl*, 4> Path;
/// Perform a cast towards the class of the Decl (either up or down the
/// hierarchy).
bool castBack(const CXXRecordDecl *Class) {
assert(!Path.empty());
const CXXRecordDecl *Expected;
if (Path.size() >= 2)
Expected = Path[Path.size() - 2];
else
Expected = getContainingRecord();
if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
// C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
// if B does not contain the original member and is not a base or
// derived class of the class containing the original member, the result
// of the cast is undefined.
// C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
// (D::*). We consider that to be a language defect.
return false;
}
Path.pop_back();
return true;
}
/// Perform a base-to-derived member pointer cast.
bool castToDerived(const CXXRecordDecl *Derived) {
if (!getDecl())
return true;
if (!isDerivedMember()) {
Path.push_back(Derived);
return true;
}
if (!castBack(Derived))
return false;
if (Path.empty())
DeclAndIsDerivedMember.setInt(false);
return true;
}
/// Perform a derived-to-base member pointer cast.
bool castToBase(const CXXRecordDecl *Base) {
if (!getDecl())
return true;
if (Path.empty())
DeclAndIsDerivedMember.setInt(true);
if (isDerivedMember()) {
Path.push_back(Base);
return true;
}
return castBack(Base);
}
};
/// Compare two member pointers, which are assumed to be of the same type.
static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
if (!LHS.getDecl() || !RHS.getDecl())
return !LHS.getDecl() && !RHS.getDecl();
if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
return false;
return LHS.Path == RHS.Path;
}
}
static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
const LValue &This, const Expr *E,
bool AllowNonLiteralTypes = false);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info);
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info);
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
/// Produce a string describing the given constexpr call.
static void describeCall(CallStackFrame *Frame, raw_ostream &Out) {
unsigned ArgIndex = 0;
bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) &&
!isa<CXXConstructorDecl>(Frame->Callee) &&
cast<CXXMethodDecl>(Frame->Callee)->isInstance();
if (!IsMemberCall)
Out << *Frame->Callee << '(';
if (Frame->This && IsMemberCall) {
APValue Val;
Frame->This->moveInto(Val);
Val.printPretty(Out, Frame->Info.Ctx,
Frame->This->Designator.MostDerivedType);
// FIXME: Add parens around Val if needed.
Out << "->" << *Frame->Callee << '(';
IsMemberCall = false;
}
for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(),
E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) {
if (ArgIndex > (unsigned)IsMemberCall)
Out << ", ";
const ParmVarDecl *Param = *I;
const APValue &Arg = Frame->Arguments[ArgIndex];
Arg.printPretty(Out, Frame->Info.Ctx, Param->getType());
if (ArgIndex == 0 && IsMemberCall)
Out << "->" << *Frame->Callee << '(';
}
Out << ')';
}
/// Evaluate an expression to see if it had side-effects, and discard its
/// result.
/// \return \c true if the caller should keep evaluating.
static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
APValue Scratch;
if (!Evaluate(Scratch, Info, E))
// We don't need the value, but we might have skipped a side effect here.
return Info.noteSideEffect();
return true;
}
/// Sign- or zero-extend a value to 64 bits. If it's already 64 bits, just
/// return its existing value.
static int64_t getExtValue(const APSInt &Value) {
return Value.isSigned() ? Value.getSExtValue()
: static_cast<int64_t>(Value.getZExtValue());
}
/// Should this call expression be treated as a string literal?
static bool IsStringLiteralCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
}
static bool IsGlobalLValue(APValue::LValueBase B) {
// C++11 [expr.const]p3 An address constant expression is a prvalue core
// constant expression of pointer type that evaluates to...
// ... a null pointer value, or a prvalue core constant expression of type
// std::nullptr_t.
if (!B) return true;
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
// ... the address of an object with static storage duration,
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
// ... the address of a function,
return isa<FunctionDecl>(D);
}
const Expr *E = B.get<const Expr*>();
switch (E->getStmtClass()) {
default:
return false;
case Expr::CompoundLiteralExprClass: {
const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
return CLE->isFileScope() && CLE->isLValue();
}
case Expr::MaterializeTemporaryExprClass:
// A materialized temporary might have been lifetime-extended to static
// storage duration.
return cast<MaterializeTemporaryExpr>(E)->getStorageDuration() == SD_Static;
// A string literal has static storage duration.
case Expr::StringLiteralClass:
case Expr::PredefinedExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCEncodeExprClass:
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
return true;
case Expr::CallExprClass:
return IsStringLiteralCall(cast<CallExpr>(E));
// For GCC compatibility, &&label has static storage duration.
case Expr::AddrLabelExprClass:
return true;
// A Block literal expression may be used as the initialization value for
// Block variables at global or local static scope.
case Expr::BlockExprClass:
return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
case Expr::ImplicitValueInitExprClass:
// FIXME:
// We can never form an lvalue with an implicit value initialization as its
// base through expression evaluation, so these only appear in one case: the
// implicit variable declaration we invent when checking whether a constexpr
// constructor can produce a constant expression. We must assume that such
// an expression might be a global lvalue.
return true;
}
}
static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
assert(Base && "no location for a null lvalue");
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
else
Info.Note(Base.get<const Expr*>()->getExprLoc(),
diag::note_constexpr_temporary_here);
}
/// Check that this reference or pointer core constant expression is a valid
/// value for an address or reference constant expression. Return true if we
/// can fold this expression, whether or not it's a constant expression.
static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
QualType Type, const LValue &LVal) {
bool IsReferenceType = Type->isReferenceType();
APValue::LValueBase Base = LVal.getLValueBase();
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
// Check that the object is a global. Note that the fake 'this' object we
// manufacture when checking potential constant expressions is conservatively
// assumed to be global here.
if (!IsGlobalLValue(Base)) {
if (Info.getLangOpts().CPlusPlus11) {
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.Diag(Loc, diag::note_constexpr_non_global, 1)
<< IsReferenceType << !Designator.Entries.empty()
<< !!VD << VD;
NoteLValueLocation(Info, Base);
} else {
Info.Diag(Loc);
}
// Don't allow references to temporaries to escape.
return false;
}
assert((Info.checkingPotentialConstantExpression() ||
LVal.getLValueCallIndex() == 0) &&
"have call index for global lvalue");
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
if (const VarDecl *Var = dyn_cast<const VarDecl>(VD)) {
// Check if this is a thread-local variable.
if (Var->getTLSKind())
return false;
// A dllimport variable never acts like a constant.
if (Var->hasAttr<DLLImportAttr>())
return false;
}
if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) {
// __declspec(dllimport) must be handled very carefully:
// We must never initialize an expression with the thunk in C++.
// Doing otherwise would allow the same id-expression to yield
// different addresses for the same function in different translation
// units. However, this means that we must dynamically initialize the
// expression with the contents of the import address table at runtime.
//
// The C language has no notion of ODR; furthermore, it has no notion of
// dynamic initialization. This means that we are permitted to
// perform initialization with the address of the thunk.
if (Info.getLangOpts().CPlusPlus && FD->hasAttr<DLLImportAttr>())
return false;
}
}
// Allow address constant expressions to be past-the-end pointers. This is
// an extension: the standard requires them to point to an object.
if (!IsReferenceType)
return true;
// A reference constant expression must refer to an object.
if (!Base) {
// FIXME: diagnostic
Info.CCEDiag(Loc);
return true;
}
// Does this refer one past the end of some object?
if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.Diag(Loc, diag::note_constexpr_past_end, 1)
<< !Designator.Entries.empty() << !!VD << VD;
NoteLValueLocation(Info, Base);
}
return true;
}
/// Check that this core constant expression is of literal type, and if not,
/// produce an appropriate diagnostic.
static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
const LValue *This = nullptr) {
if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx))
return true;
// C++1y: A constant initializer for an object o [...] may also invoke
// constexpr constructors for o and its subobjects even if those objects
// are of non-literal class types.
if (Info.getLangOpts().CPlusPlus14 && This &&
Info.EvaluatingDecl == This->getLValueBase())
return true;
// Prvalue constant expressions must be of literal types.
if (Info.getLangOpts().CPlusPlus11)
Info.Diag(E, diag::note_constexpr_nonliteral)
<< E->getType();
else
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
/// Check that this core constant expression value is a valid value for a
/// constant expression. If not, report an appropriate diagnostic. Does not
/// check that the expression is of literal type.
static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value) {
if (Value.isUninit()) {
Info.Diag(DiagLoc, diag::note_constexpr_uninitialized)
<< true << Type;
return false;
}
// We allow _Atomic(T) to be initialized from anything that T can be
// initialized from.
if (const AtomicType *AT = Type->getAs<AtomicType>())
Type = AT->getValueType();
// Core issue 1454: For a literal constant expression of array or class type,
// each subobject of its value shall have been initialized by a constant
// expression.
if (Value.isArray()) {
QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckConstantExpression(Info, DiagLoc, EltTy,
Value.getArrayInitializedElt(I)))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckConstantExpression(Info, DiagLoc, EltTy,
Value.getArrayFiller());
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckConstantExpression(Info, DiagLoc,
Value.getUnionField()->getType(),
Value.getUnionValue());
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
Value.getStructBase(BaseIndex)))
return false;
}
}
for (const auto *I : RD->fields()) {
if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
Value.getStructField(I->getFieldIndex())))
return false;
}
}
if (Value.isLValue()) {
LValue LVal;
LVal.setFrom(Info.Ctx, Value);
return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
}
// Everything else is fine.
return true;
}
static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
return LVal.Base.dyn_cast<const ValueDecl*>();
}
static bool IsLiteralLValue(const LValue &Value) {
if (Value.CallIndex)
return false;
const Expr *E = Value.Base.dyn_cast<const Expr*>();
return E && !isa<MaterializeTemporaryExpr>(E);
}
static bool IsWeakLValue(const LValue &Value) {
const ValueDecl *Decl = GetLValueBaseDecl(Value);
return Decl && Decl->isWeak();
}
static bool isZeroSized(const LValue &Value) {
const ValueDecl *Decl = GetLValueBaseDecl(Value);
if (Decl && isa<VarDecl>(Decl)) {
QualType Ty = Decl->getType();
if (Ty->isArrayType())
return Ty->isIncompleteType() ||
Decl->getASTContext().getTypeSize(Ty) == 0;
}
return false;
}
static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
// A null base expression indicates a null pointer. These are always
// evaluatable, and they are false unless the offset is zero.
if (!Value.getLValueBase()) {
Result = !Value.getLValueOffset().isZero();
return true;
}
// We have a non-null base. These are generally known to be true, but if it's
// a weak declaration it can be null at runtime.
Result = true;
const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
return !Decl || !Decl->isWeak();
}
static bool HandleConversionToBool(const APValue &Val, bool &Result) {
switch (Val.getKind()) {
case APValue::Uninitialized:
return false;
case APValue::Int:
Result = Val.getInt().getBoolValue();
return true;
case APValue::Float:
Result = !Val.getFloat().isZero();
return true;
case APValue::ComplexInt:
Result = Val.getComplexIntReal().getBoolValue() ||
Val.getComplexIntImag().getBoolValue();
return true;
case APValue::ComplexFloat:
Result = !Val.getComplexFloatReal().isZero() ||
!Val.getComplexFloatImag().isZero();
return true;
case APValue::LValue:
return EvalPointerValueAsBool(Val, Result);
case APValue::MemberPointer:
Result = Val.getMemberPointerDecl();
return true;
case APValue::Vector:
case APValue::Array:
case APValue::Struct:
case APValue::Union:
case APValue::AddrLabelDiff:
return false;
}
llvm_unreachable("unknown APValue kind");
}
static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
EvalInfo &Info) {
assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
APValue Val;
if (!Evaluate(Val, Info, E))
return false;
return HandleConversionToBool(Val, Result);
}
template<typename T>
static void HandleOverflow(EvalInfo &Info, const Expr *E,
const T &SrcValue, QualType DestType) {
Info.CCEDiag(E, diag::note_constexpr_overflow)
<< SrcValue << DestType;
// HLSL changes begin
if (Info.getLangOpts().HLSL)
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_hlsl_constexpr_overflow) << DestType;
// HLSL changes end
}
static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
QualType SrcType, const APFloat &Value,
QualType DestType, APSInt &Result) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
// Determine whether we are converting to unsigned or signed.
bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
Result = APSInt(DestWidth, !DestSigned);
bool ignored;
if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
& APFloat::opInvalidOp)
HandleOverflow(Info, E, Value, DestType);
return true;
}
static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
APFloat Value = Result;
bool ignored;
if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
APFloat::rmNearestTiesToEven, &ignored)
& APFloat::opOverflow)
HandleOverflow(Info, E, Value, DestType);
return true;
}
static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
QualType DestType, QualType SrcType,
APSInt &Value) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
Result = Result.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
return Result;
}
static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, const APSInt &Value,
QualType DestType, APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
if (Result.convertFromAPInt(Value, Value.isSigned(),
APFloat::rmNearestTiesToEven)
& APFloat::opOverflow)
HandleOverflow(Info, E, Value, DestType);
return true;
}
static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
APValue &Value, const FieldDecl *FD) {
assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
if (!Value.isInt()) {
// Trying to store a pointer-cast-to-integer into a bitfield.
// FIXME: In this case, we should provide the diagnostic for casting
// a pointer to an integer.
assert(Value.isLValue() && "integral value neither int nor lvalue?");
Info.Diag(E);
return false;
}
APSInt &Int = Value.getInt();
unsigned OldBitWidth = Int.getBitWidth();
unsigned NewBitWidth = FD->getBitWidthValue(Info.Ctx);
if (NewBitWidth < OldBitWidth)
Int = Int.trunc(NewBitWidth).extend(OldBitWidth);
return true;
}
static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
llvm::APInt &Res) {
APValue SVal;
if (!Evaluate(SVal, Info, E))
return false;
if (SVal.isInt()) {
Res = SVal.getInt();
return true;
}
if (SVal.isFloat()) {
Res = SVal.getFloat().bitcastToAPInt();
return true;
}
if (SVal.isVector()) {
QualType VecTy = E->getType();
unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
Res = llvm::APInt::getNullValue(VecSize);
for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
APValue &Elt = SVal.getVectorElt(i);
llvm::APInt EltAsInt;
if (Elt.isInt()) {
EltAsInt = Elt.getInt();
} else if (Elt.isFloat()) {
EltAsInt = Elt.getFloat().bitcastToAPInt();
} else {
// Don't try to handle vectors of anything other than int or float
// (not sure if it's possible to hit this case).
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
unsigned BaseEltSize = EltAsInt.getBitWidth();
if (BigEndian)
Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
else
Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
}
return true;
}
// Give up if the input isn't an int, float, or vector. For example, we
// reject "(v4i16)(intptr_t)&a".
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
/// Perform the given integer operation, which is known to need at most BitWidth
/// bits, and check for overflow in the original type (if that type was not an
/// unsigned type).
template<typename Operation>
static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &LHS, const APSInt &RHS,
unsigned BitWidth, Operation Op) {
if (LHS.isUnsigned())
return Op(LHS, RHS);
APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
APSInt Result = Value.trunc(LHS.getBitWidth());
if (Result.extend(BitWidth) != Value) {
if (Info.checkingForOverflow())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_integer_constant_overflow)
<< Result.toString(10) << E->getType();
else
HandleOverflow(Info, E, Value, E->getType());
}
return Result;
}
/// Perform the given binary integer operation.
static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
BinaryOperatorKind Opcode, APSInt RHS,
APSInt &Result) {
switch (Opcode) {
default:
Info.Diag(E);
return false;
case BO_Mul:
Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() * 2,
std::multiplies<APSInt>());
return true;
case BO_Add:
Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
std::plus<APSInt>());
return true;
case BO_Sub:
Result = CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
std::minus<APSInt>());
return true;
case BO_And: Result = LHS & RHS; return true;
case BO_Xor: Result = LHS ^ RHS; return true;
case BO_Or: Result = LHS | RHS; return true;
case BO_Div:
case BO_Rem:
if (RHS == 0) {
Info.Diag(E, diag::note_expr_divide_by_zero);
return false;
}
// Check for overflow case: INT_MIN / -1 or INT_MIN % -1.
if (RHS.isNegative() && RHS.isAllOnesValue() &&
LHS.isSigned() && LHS.isMinSignedValue())
HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
return true;
case BO_Shl: {
if (Info.getLangOpts().OpenCL)
// OpenCL 6.3j: shift values are effectively % word size of LHS.
RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
static_cast<uint64_t>(LHS.getBitWidth() - 1)),
RHS.isUnsigned());
else if (RHS.isSigned() && RHS.isNegative()) {
// During constant-folding, a negative shift is an opposite shift. Such
// a shift is not a constant expression.
Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
RHS = -RHS;
goto shift_right;
}
shift_left:
// C++11 [expr.shift]p1: Shift width must be less than the bit width of
// the shifted type.
unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
} else if (LHS.isSigned()) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
if (LHS.isNegative())
Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
else if (LHS.countLeadingZeros() < SA)
Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
}
Result = LHS << SA;
return true;
}
case BO_Shr: {
if (Info.getLangOpts().OpenCL)
// OpenCL 6.3j: shift values are effectively % word size of LHS.
RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
static_cast<uint64_t>(LHS.getBitWidth() - 1)),
RHS.isUnsigned());
else if (RHS.isSigned() && RHS.isNegative()) {
// During constant-folding, a negative shift is an opposite shift. Such a
// shift is not a constant expression.
Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
RHS = -RHS;
goto shift_left;
}
shift_right:
// C++11 [expr.shift]p1: Shift width must be less than the bit width of the
// shifted type.
unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
if (SA != RHS)
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
Result = LHS >> SA;
return true;
}
case BO_LT: Result = LHS < RHS; return true;
case BO_GT: Result = LHS > RHS; return true;
case BO_LE: Result = LHS <= RHS; return true;
case BO_GE: Result = LHS >= RHS; return true;
case BO_EQ: Result = LHS == RHS; return true;
case BO_NE: Result = LHS != RHS; return true;
}
}
/// Perform the given binary floating-point operation, in-place, on LHS.
static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
APFloat &LHS, BinaryOperatorKind Opcode,
const APFloat &RHS) {
switch (Opcode) {
default:
Info.Diag(E);
return false;
case BO_Mul:
LHS.multiply(RHS, APFloat::rmNearestTiesToEven);
break;
case BO_Add:
LHS.add(RHS, APFloat::rmNearestTiesToEven);
break;
case BO_Sub:
LHS.subtract(RHS, APFloat::rmNearestTiesToEven);
break;
case BO_Div:
LHS.divide(RHS, APFloat::rmNearestTiesToEven);
break;
}
if (LHS.isInfinity() || LHS.isNaN())
Info.CCEDiag(E, diag::note_constexpr_float_arithmetic) << LHS.isNaN();
return true;
}
/// Cast an lvalue referring to a base subobject to a derived class, by
/// truncating the lvalue's path to the given length.
static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
const RecordDecl *TruncatedType,
unsigned TruncatedElements) {
SubobjectDesignator &D = Result.Designator;
// Check we actually point to a derived class object.
if (TruncatedElements == D.Entries.size())
return true;
assert(TruncatedElements >= D.MostDerivedPathLength &&
"not casting to a derived class");
if (!Result.checkSubobject(Info, E, CSK_Derived))
return false;
// Truncate the path to the subobject, and remove any derived-to-base offsets.
const RecordDecl *RD = TruncatedType;
for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
if (isVirtualBaseClass(D.Entries[I]))
Result.Offset -= Layout.getVBaseClassOffset(Base);
else
Result.Offset -= Layout.getBaseClassOffset(Base);
RD = Base;
}
D.Entries.resize(TruncatedElements);
return true;
}
static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
const ASTRecordLayout *RL = nullptr) {
if (!RL) {
if (Derived->isInvalidDecl()) return false;
RL = &Info.Ctx.getASTRecordLayout(Derived);
}
Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
Obj.addDecl(Info, E, Base, /*Virtual*/ false);
return true;
}
static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
const CXXRecordDecl *DerivedDecl,
const CXXBaseSpecifier *Base) {
const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
if (!Base->isVirtual())
return HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
SubobjectDesignator &D = Obj.Designator;
if (D.Invalid)
return false;
// Extract most-derived object and corresponding type.
DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
return false;
// Find the virtual base class.
if (DerivedDecl->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
return true;
}
static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
QualType Type, LValue &Result) {
for (CastExpr::path_const_iterator PathI = E->path_begin(),
PathE = E->path_end();
PathI != PathE; ++PathI) {
if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
*PathI))
return false;
Type = (*PathI)->getType();
}
return true;
}
/// Update LVal to refer to the given field, which must be a member of the type
/// currently described by LVal.
static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
const FieldDecl *FD,
const ASTRecordLayout *RL = nullptr) {
if (!RL) {
if (FD->getParent()->isInvalidDecl()) return false;
RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
}
unsigned I = FD->getFieldIndex();
LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
LVal.addDecl(Info, E, FD);
return true;
}
/// Update LVal to refer to the given indirect field.
static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
LValue &LVal,
const IndirectFieldDecl *IFD) {
for (const auto *C : IFD->chain())
if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C)))
return false;
return true;
}
/// Get the size of the given type in char units.
static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
QualType Type, CharUnits &Size) {
// sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
// extension.
if (Type->isVoidType() || Type->isFunctionType()) {
Size = CharUnits::One();
return true;
}
if (!Type->isConstantSizeType()) {
// sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
// FIXME: Better diagnostic.
Info.Diag(Loc);
return false;
}
Size = Info.Ctx.getTypeSizeInChars(Type);
return true;
}
/// Update a pointer value to model pointer arithmetic.
/// \param Info - Information about the ongoing evaluation.
/// \param E - The expression being evaluated, for diagnostic purposes.
/// \param LVal - The pointer value to be updated.
/// \param EltTy - The pointee type represented by LVal.
/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
LValue &LVal, QualType EltTy,
int64_t Adjustment) {
CharUnits SizeOfPointee;
if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
return false;
// Compute the new offset in the appropriate width.
LVal.Offset += Adjustment * SizeOfPointee;
LVal.adjustIndex(Info, E, Adjustment);
return true;
}
/// Update an lvalue to refer to a component of a complex number.
/// \param Info - Information about the ongoing evaluation.
/// \param LVal - The lvalue to be updated.
/// \param EltTy - The complex number's component type.
/// \param Imag - False for the real component, true for the imaginary.
static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
LValue &LVal, QualType EltTy,
bool Imag) {
if (Imag) {
CharUnits SizeOfComponent;
if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
return false;
LVal.Offset += SizeOfComponent;
}
LVal.addComplex(Info, E, EltTy, Imag);
return true;
}
/// Try to evaluate the initializer for a variable declaration.
///
/// \param Info Information about the ongoing evaluation.
/// \param E An expression to be used when printing diagnostics.
/// \param VD The variable whose initializer should be obtained.
/// \param Frame The frame in which the variable was created. Must be null
/// if this variable is not local to the evaluation.
/// \param Result Filled in with a pointer to the value of the variable.
static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
const VarDecl *VD, CallStackFrame *Frame,
APValue *&Result) {
// If this is a parameter to an active constexpr function call, perform
// argument substitution.
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
// Assume arguments of a potential constant expression are unknown
// constant expressions.
if (Info.checkingPotentialConstantExpression())
return false;
if (!Frame || !Frame->Arguments) {
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
return true;
}
// If this is a local variable, dig out its value.
if (Frame) {
Result = Frame->getTemporary(VD);
assert(Result && "missing value for local variable");
return true;
}
// Dig out the initializer, and use the declaration which it's attached to.
const Expr *Init = VD->getAnyInitializer(VD);
if (!Init || Init->isValueDependent()) {
// If we're checking a potential constant expression, the variable could be
// initialized later.
if (!Info.checkingPotentialConstantExpression())
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
// If we're currently evaluating the initializer of this declaration, use that
// in-flight value.
if (Info.EvaluatingDecl.dyn_cast<const ValueDecl*>() == VD) {
Result = Info.EvaluatingDeclValue;
return true;
}
// Never evaluate the initializer of a weak variable. We can't be sure that
// this is the definition which will be used.
if (VD->isWeak()) {
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
// HLSL Change Begin - External variable is in cbuffer, cannot use as immediate.
if (VD->hasExternalFormalLinkage() &&
!isa<EnumConstantDecl>(VD))
return false;
// HLSL Change End.
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
SmallVector<PartialDiagnosticAt, 8> Notes;
if (!VD->evaluateValue(Notes)) {
Info.Diag(E, diag::note_constexpr_var_init_non_constant,
Notes.size() + 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
return false;
} else if (!VD->checkInitIsICE()) {
Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
Notes.size() + 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
}
Result = VD->getEvaluatedValue();
return true;
}
static bool IsConstNonVolatile(QualType T) {
Qualifiers Quals = T.getQualifiers();
return Quals.hasConst() && !Quals.hasVolatile();
}
/// Get the base index of the given base class within an APValue representing
/// the given derived class.
static unsigned getBaseIndex(const CXXRecordDecl *Derived,
const CXXRecordDecl *Base) {
Base = Base->getCanonicalDecl();
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
E = Derived->bases_end(); I != E; ++I, ++Index) {
if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
return Index;
}
llvm_unreachable("base class missing from derived class's bases list");
}
/// Extract the value of a character from a string literal.
static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
uint64_t Index) {
// FIXME: Support ObjCEncodeExpr, MakeStringConstant
if (auto PE = dyn_cast<PredefinedExpr>(Lit))
Lit = PE->getFunctionName();
const StringLiteral *S = cast<StringLiteral>(Lit);
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(S->getType());
assert(CAT && "string literal isn't an array");
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
CharType->isUnsignedIntegerType());
if (Index < S->getLength())
Value = S->getCodeUnit(Index);
return Value;
}
// Expand a string literal into an array of characters.
static void expandStringLiteral(EvalInfo &Info, const Expr *Lit,
APValue &Result) {
const StringLiteral *S = cast<StringLiteral>(Lit);
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(S->getType());
assert(CAT && "string literal isn't an array");
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
unsigned Elts = CAT->getSize().getZExtValue();
Result = APValue(APValue::UninitArray(),
std::min(S->getLength(), Elts), Elts);
APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
CharType->isUnsignedIntegerType());
if (Result.hasArrayFiller())
Result.getArrayFiller() = APValue(Value);
for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) {
Value = S->getCodeUnit(I);
Result.getArrayInitializedElt(I) = APValue(Value);
}
}
// Expand an array so that it has more than Index filled elements.
static void expandArray(APValue &Array, unsigned Index) {
unsigned Size = Array.getArraySize();
assert(Index < Size);
// Always at least double the number of elements for which we store a value.
unsigned OldElts = Array.getArrayInitializedElts();
unsigned NewElts = std::max(Index+1, OldElts * 2);
NewElts = std::min(Size, std::max(NewElts, 8u));
// Copy the data across.
APValue NewValue(APValue::UninitArray(), NewElts, Size);
for (unsigned I = 0; I != OldElts; ++I)
NewValue.getArrayInitializedElt(I).swap(Array.getArrayInitializedElt(I));
for (unsigned I = OldElts; I != NewElts; ++I)
NewValue.getArrayInitializedElt(I) = Array.getArrayFiller();
if (NewValue.hasArrayFiller())
NewValue.getArrayFiller() = Array.getArrayFiller();
Array.swap(NewValue);
}
/// Determine whether a type would actually be read by an lvalue-to-rvalue
/// conversion. If it's of class type, we may assume that the copy operation
/// is trivial. Note that this is never true for a union type with fields
/// (because the copy always "reads" the active member) and always true for
/// a non-class type.
static bool isReadByLvalueToRvalueConversion(QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
if (!RD || (RD->isUnion() && !RD->field_empty()))
return true;
if (RD->isEmpty())
return false;
for (auto *Field : RD->fields())
if (isReadByLvalueToRvalueConversion(Field->getType()))
return true;
for (auto &BaseSpec : RD->bases())
if (isReadByLvalueToRvalueConversion(BaseSpec.getType()))
return true;
return false;
}
/// Diagnose an attempt to read from any unreadable field within the specified
/// type, which might be a class type.
static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
if (!RD)
return false;
if (!RD->hasMutableFields())
return false;
for (auto *Field : RD->fields()) {
// If we're actually going to read this field in some way, then it can't
// be mutable. If we're in a union, then assigning to a mutable field
// (even an empty one) can change the active member, so that's not OK.
// FIXME: Add core issue number for the union case.
if (Field->isMutable() &&
(RD->isUnion() || isReadByLvalueToRvalueConversion(Field->getType()))) {
Info.Diag(E, diag::note_constexpr_ltor_mutable, 1) << Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
return true;
}
if (diagnoseUnreadableFields(Info, E, Field->getType()))
return true;
}
for (auto &BaseSpec : RD->bases())
if (diagnoseUnreadableFields(Info, E, BaseSpec.getType()))
return true;
// All mutable fields were empty, and thus not actually read.
return false;
}
/// Kinds of access we can perform on an object, for diagnostics.
enum AccessKinds {
AK_Read,
AK_Assign,
AK_Increment,
AK_Decrement
};
/// A handle to a complete object (an object that is not a subobject of
/// another object).
struct CompleteObject {
/// The value of the complete object.
APValue *Value;
/// The type of the complete object.
QualType Type;
CompleteObject() : Value(nullptr) {}
CompleteObject(APValue *Value, QualType Type)
: Value(Value), Type(Type) {
assert(Value && "missing value for complete object");
}
explicit operator bool() const { return Value; }
};
/// Find the designated sub-object of an rvalue.
template<typename SubobjectHandler>
typename SubobjectHandler::result_type
findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
const SubobjectDesignator &Sub, SubobjectHandler &handler) {
if (Sub.Invalid)
// A diagnostic will have already been produced.
return handler.failed();
if (Sub.isOnePastTheEnd()) {
if (Info.getLangOpts().CPlusPlus11)
Info.Diag(E, diag::note_constexpr_access_past_end)
<< handler.AccessKind;
else
Info.Diag(E);
return handler.failed();
}
APValue *O = Obj.Value;
QualType ObjType = Obj.Type;
const FieldDecl *LastField = nullptr;
// Walk the designator's path to find the subobject.
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
if (O->isUninit()) {
if (!Info.checkingPotentialConstantExpression())
Info.Diag(E, diag::note_constexpr_access_uninit) << handler.AccessKind;
return handler.failed();
}
if (I == N) {
// If we are reading an object of class type, there may still be more
// things we need to check: if there are any mutable subobjects, we
// cannot perform this read. (This only happens when performing a trivial
// copy or assignment.)
if (ObjType->isRecordType() && handler.AccessKind == AK_Read &&
diagnoseUnreadableFields(Info, E, ObjType))
return handler.failed();
if (!handler.found(*O, ObjType))
return false;
// If we modified a bit-field, truncate it to the right width.
if (handler.AccessKind != AK_Read &&
LastField && LastField->isBitField() &&
!truncateBitfieldValue(Info, E, *O, LastField))
return false;
return true;
}
LastField = nullptr;
if (ObjType->isArrayType()) {
// Next subobject is an array element.
const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
assert(CAT && "vla in literal type?");
uint64_t Index = Sub.Entries[I].ArrayIndex;
if (CAT->getSize().ule(Index)) {
// Note, it should not be possible to form a pointer with a valid
// designator which points more than one past the end of the array.
if (Info.getLangOpts().CPlusPlus11)
Info.Diag(E, diag::note_constexpr_access_past_end)
<< handler.AccessKind;
else
Info.Diag(E);
return handler.failed();
}
ObjType = CAT->getElementType();
// An array object is represented as either an Array APValue or as an
// LValue which refers to a string literal.
if (O->isLValue()) {
assert(I == N - 1 && "extracting subobject of character?");
assert(!O->hasLValuePath() || O->getLValuePath().empty());
if (handler.AccessKind != AK_Read)
expandStringLiteral(Info, O->getLValueBase().get<const Expr *>(),
*O);
else
return handler.foundString(*O, ObjType, Index);
}
if (O->getArrayInitializedElts() > Index)
O = &O->getArrayInitializedElt(Index);
else if (handler.AccessKind != AK_Read) {
expandArray(*O, Index);
O = &O->getArrayInitializedElt(Index);
} else
O = &O->getArrayFiller();
} else if (ObjType->isAnyComplexType()) {
// Next subobject is a complex number.
uint64_t Index = Sub.Entries[I].ArrayIndex;
if (Index > 1) {
if (Info.getLangOpts().CPlusPlus11)
Info.Diag(E, diag::note_constexpr_access_past_end)
<< handler.AccessKind;
else
Info.Diag(E);
return handler.failed();
}
bool WasConstQualified = ObjType.isConstQualified();
ObjType = ObjType->castAs<ComplexType>()->getElementType();
if (WasConstQualified)
ObjType.addConst();
assert(I == N - 1 && "extracting subobject of scalar?");
if (O->isComplexInt()) {
return handler.found(Index ? O->getComplexIntImag()
: O->getComplexIntReal(), ObjType);
} else {
assert(O->isComplexFloat());
return handler.found(Index ? O->getComplexFloatImag()
: O->getComplexFloatReal(), ObjType);
}
} else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
if (Field->isMutable() && handler.AccessKind == AK_Read) {
Info.Diag(E, diag::note_constexpr_ltor_mutable, 1)
<< Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
return handler.failed();
}
// Next subobject is a class, struct or union field.
RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
if (RD->isUnion()) {
const FieldDecl *UnionField = O->getUnionField();
if (!UnionField ||
UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
Info.Diag(E, diag::note_constexpr_access_inactive_union_member)
<< handler.AccessKind << Field << !UnionField << UnionField;
return handler.failed();
}
O = &O->getUnionValue();
} else
O = &O->getStructField(Field->getFieldIndex());
bool WasConstQualified = ObjType.isConstQualified();
ObjType = Field->getType();
if (WasConstQualified && !Field->isMutable())
ObjType.addConst();
if (ObjType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus) {
// FIXME: Include a description of the path to the volatile subobject.
Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
<< handler.AccessKind << 2 << Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
} else {
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
}
return handler.failed();
}
LastField = Field;
} else {
// Next subobject is a base class.
const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
O = &O->getStructBase(getBaseIndex(Derived, Base));
bool WasConstQualified = ObjType.isConstQualified();
ObjType = Info.Ctx.getRecordType(Base);
if (WasConstQualified)
ObjType.addConst();
}
}
}
namespace {
struct ExtractSubobjectHandler {
EvalInfo &Info;
APValue &Result;
static const AccessKinds AccessKind = AK_Read;
typedef bool result_type;
bool failed() { return false; }
bool found(APValue &Subobj, QualType SubobjType) {
Result = Subobj;
return true;
}
bool found(APSInt &Value, QualType SubobjType) {
Result = APValue(Value);
return true;
}
bool found(APFloat &Value, QualType SubobjType) {
Result = APValue(Value);
return true;
}
bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
Result = APValue(extractStringLiteralCharacter(
Info, Subobj.getLValueBase().get<const Expr *>(), Character));
return true;
}
};
} // end anonymous namespace
const AccessKinds ExtractSubobjectHandler::AccessKind;
/// Extract the designated sub-object of an rvalue.
static bool extractSubobject(EvalInfo &Info, const Expr *E,
const CompleteObject &Obj,
const SubobjectDesignator &Sub,
APValue &Result) {
ExtractSubobjectHandler Handler = { Info, Result };
return findSubobject(Info, E, Obj, Sub, Handler);
}
namespace {
struct ModifySubobjectHandler {
EvalInfo &Info;
APValue &NewVal;
const Expr *E;
typedef bool result_type;
static const AccessKinds AccessKind = AK_Assign;
bool checkConst(QualType QT) {
// Assigning to a const object has undefined behavior.
if (QT.isConstQualified()) {
Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
return false;
}
return true;
}
bool failed() { return false; }
bool found(APValue &Subobj, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
// We've been given ownership of NewVal, so just swap it in.
Subobj.swap(NewVal);
return true;
}
bool found(APSInt &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
if (!NewVal.isInt()) {
// Maybe trying to write a cast pointer value into a complex?
Info.Diag(E);
return false;
}
Value = NewVal.getInt();
return true;
}
bool found(APFloat &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
Value = NewVal.getFloat();
return true;
}
bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
llvm_unreachable("shouldn't encounter string elements with ExpandArrays");
}
};
} // end anonymous namespace
const AccessKinds ModifySubobjectHandler::AccessKind;
/// Update the designated sub-object of an rvalue to the given value.
static bool modifySubobject(EvalInfo &Info, const Expr *E,
const CompleteObject &Obj,
const SubobjectDesignator &Sub,
APValue &NewVal) {
ModifySubobjectHandler Handler = { Info, NewVal, E };
return findSubobject(Info, E, Obj, Sub, Handler);
}
/// Find the position where two subobject designators diverge, or equivalently
/// the length of the common initial subsequence.
static unsigned FindDesignatorMismatch(QualType ObjType,
const SubobjectDesignator &A,
const SubobjectDesignator &B,
bool &WasArrayIndex) {
unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
for (/**/; I != N; ++I) {
if (!ObjType.isNull() &&
(ObjType->isArrayType() || ObjType->isAnyComplexType())) {
// Next subobject is an array element.
if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
WasArrayIndex = true;
return I;
}
if (ObjType->isAnyComplexType())
ObjType = ObjType->castAs<ComplexType>()->getElementType();
else
ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
} else {
if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
WasArrayIndex = false;
return I;
}
if (const FieldDecl *FD = getAsField(A.Entries[I]))
// Next subobject is a field.
ObjType = FD->getType();
else
// Next subobject is a base class.
ObjType = QualType();
}
}
WasArrayIndex = false;
return I;
}
/// Determine whether the given subobject designators refer to elements of the
/// same array object.
static bool AreElementsOfSameArray(QualType ObjType,
const SubobjectDesignator &A,
const SubobjectDesignator &B) {
if (A.Entries.size() != B.Entries.size())
return false;
bool IsArray = A.MostDerivedArraySize != 0;
if (IsArray && A.MostDerivedPathLength != A.Entries.size())
// A is a subobject of the array element.
return false;
// If A (and B) designates an array element, the last entry will be the array
// index. That doesn't have to match. Otherwise, we're in the 'implicit array
// of length 1' case, and the entire path must match.
bool WasArrayIndex;
unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
return CommonLength >= A.Entries.size() - IsArray;
}
/// Find the complete object to which an LValue refers.
static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
AccessKinds AK, const LValue &LVal,
QualType LValType) {
if (!LVal.Base) {
Info.Diag(E, diag::note_constexpr_access_null) << AK;
return CompleteObject();
}
CallStackFrame *Frame = nullptr;
if (LVal.CallIndex) {
Frame = Info.getCallFrame(LVal.CallIndex);
if (!Frame) {
Info.Diag(E, diag::note_constexpr_lifetime_ended, 1)
<< AK << LVal.Base.is<const ValueDecl*>();
NoteLValueLocation(Info, LVal.Base);
return CompleteObject();
}
}
// C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
// is not a constant expression (even if the object is non-volatile). We also
// apply this rule to C++98, in order to conform to the expected 'volatile'
// semantics.
if (LValType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus)
Info.Diag(E, diag::note_constexpr_access_volatile_type)
<< AK << LValType;
else
Info.Diag(E);
return CompleteObject();
}
// Compute value storage location and type of base object.
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
// In C++11, constexpr, non-volatile variables initialized with constant
// expressions are constant expressions too. Inside constexpr functions,
// parameters are constant expressions even if they're non-const.
// In C++1y, objects local to a constant expression (those with a Frame) are
// both readable and writable inside constant expressions.
// In C, such things can also be folded, although they are not ICEs.
const VarDecl *VD = dyn_cast<VarDecl>(D);
if (VD) {
if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
VD = VDef;
}
if (!VD || VD->isInvalidDecl()) {
Info.Diag(E);
return CompleteObject();
}
// Accesses of volatile-qualified objects are not allowed.
if (BaseType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus) {
Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
<< AK << 1 << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.Diag(E);
}
return CompleteObject();
}
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
if (Info.getLangOpts().CPlusPlus14 &&
VD == Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()) {
// OK, we can read and modify an object if we're in the process of
// evaluating its initializer, because its lifetime began in this
// evaluation.
} else if (AK != AK_Read) {
// All the remaining cases only permit reading.
Info.Diag(E, diag::note_constexpr_modify_global);
return CompleteObject();
} else if (VD->isConstexpr()) {
// OK, we can read this variable.
} else if (BaseType->isIntegralOrEnumerationType()) {
if (!BaseType.isConstQualified()) {
if (Info.getLangOpts().CPlusPlus) {
Info.Diag(E, diag::note_constexpr_ltor_non_const_int, 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.Diag(E);
}
return CompleteObject();
}
} else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
// We support folding of const floating-point types, in order to make
// static const data members of such types (supported as an extension)
// more useful.
if (Info.getLangOpts().CPlusPlus11) {
Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.CCEDiag(E);
}
} else {
// FIXME: Allow folding of values of any literal type in all languages.
if (Info.getLangOpts().CPlusPlus11) {
Info.Diag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.Diag(E);
}
return CompleteObject();
}
}
if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal))
return CompleteObject();
} else {
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
if (!Frame) {
if (const MaterializeTemporaryExpr *MTE =
dyn_cast<MaterializeTemporaryExpr>(Base)) {
assert(MTE->getStorageDuration() == SD_Static &&
"should have a frame for a non-global materialized temporary");
// Per C++1y [expr.const]p2:
// an lvalue-to-rvalue conversion [is not allowed unless it applies to]
// - a [...] glvalue of integral or enumeration type that refers to
// a non-volatile const object [...]
// [...]
// - a [...] glvalue of literal type that refers to a non-volatile
// object whose lifetime began within the evaluation of e.
//
// C++11 misses the 'began within the evaluation of e' check and
// instead allows all temporaries, including things like:
// int &&r = 1;
// int x = ++r;
// constexpr int k = r;
// Therefore we use the C++1y rules in C++11 too.
const ValueDecl *VD = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
const ValueDecl *ED = MTE->getExtendingDecl();
if (!(BaseType.isConstQualified() &&
BaseType->isIntegralOrEnumerationType()) &&
!(VD && VD->getCanonicalDecl() == ED->getCanonicalDecl())) {
Info.Diag(E, diag::note_constexpr_access_static_temporary, 1) << AK;
Info.Note(MTE->getExprLoc(), diag::note_constexpr_temporary_here);
return CompleteObject();
}
BaseVal = Info.Ctx.getMaterializedTemporaryValue(MTE, false);
assert(BaseVal && "got reference to unevaluated temporary");
} else {
Info.Diag(E);
return CompleteObject();
}
} else {
BaseVal = Frame->getTemporary(Base);
assert(BaseVal && "missing value for temporary");
}
// Volatile temporary objects cannot be accessed in constant expressions.
if (BaseType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus) {
Info.Diag(E, diag::note_constexpr_access_volatile_obj, 1)
<< AK << 0;
Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
} else {
Info.Diag(E);
}
return CompleteObject();
}
}
// During the construction of an object, it is not yet 'const'.
// FIXME: We don't set up EvaluatingDecl for local variables or temporaries,
// and this doesn't do quite the right thing for const subobjects of the
// object under construction.
if (LVal.getLValueBase() == Info.EvaluatingDecl) {
BaseType = Info.Ctx.getCanonicalType(BaseType);
BaseType.removeLocalConst();
}
// In C++1y, we can't safely access any mutable state when we might be
// evaluating after an unmodeled side effect or an evaluation failure.
//
// FIXME: Not all local state is mutable. Allow local constant subobjects
// to be read here (but take care with 'mutable' fields).
if (Frame && Info.getLangOpts().CPlusPlus14 &&
(Info.EvalStatus.HasSideEffects || Info.keepEvaluatingAfterFailure()))
return CompleteObject();
return CompleteObject(BaseVal, BaseType);
}
/// \brief Perform an lvalue-to-rvalue conversion on the given glvalue. This
/// can also be used for 'lvalue-to-lvalue' conversions for looking up the
/// glvalue referred to by an entity of reference type.
///
/// \param Info - Information about the ongoing evaluation.
/// \param Conv - The expression for which we are performing the conversion.
/// Used for diagnostics.
/// \param Type - The type of the glvalue (before stripping cv-qualifiers in the
/// case of a non-class type).
/// \param LVal - The glvalue on which we are attempting to perform this action.
/// \param RVal - The produced value will be placed here.
static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
QualType Type,
const LValue &LVal, APValue &RVal) {
if (LVal.Designator.Invalid)
return false;
// Check for special cases where there is no existing APValue to look at.
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
if (!LVal.Designator.Invalid && Base && !LVal.CallIndex &&
!Type.isVolatileQualified()) {
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
// In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
// initializer until now for such expressions. Such an expression can't be
// an ICE in C, so this only matters for fold.
assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
if (Type.isVolatileQualified()) {
Info.Diag(Conv);
return false;
}
APValue Lit;
if (!Evaluate(Lit, Info, CLE->getInitializer()))
return false;
CompleteObject LitObj(&Lit, Base->getType());
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal);
} else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
// We represent a string literal array as an lvalue pointing at the
// corresponding expression, rather than building an array of chars.
// FIXME: Support ObjCEncodeExpr, MakeStringConstant
APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
CompleteObject StrObj(&Str, Base->getType());
return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal);
}
}
CompleteObject Obj = findCompleteObject(Info, Conv, AK_Read, LVal, Type);
return Obj && extractSubobject(Info, Conv, Obj, LVal.Designator, RVal);
}
/// Perform an assignment of Val to LVal. Takes ownership of Val.
static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
QualType LValType, APValue &Val) {
if (LVal.Designator.Invalid)
return false;
if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
}
static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) {
return T->isSignedIntegerType() &&
Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy);
}
namespace {
struct CompoundAssignSubobjectHandler {
EvalInfo &Info;
const Expr *E;
QualType PromotedLHSType;
BinaryOperatorKind Opcode;
const APValue &RHS;
static const AccessKinds AccessKind = AK_Assign;
typedef bool result_type;
bool checkConst(QualType QT) {
// Assigning to a const object has undefined behavior.
if (QT.isConstQualified()) {
Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
return false;
}
return true;
}
bool failed() { return false; }
bool found(APValue &Subobj, QualType SubobjType) {
switch (Subobj.getKind()) {
case APValue::Int:
return found(Subobj.getInt(), SubobjType);
case APValue::Float:
return found(Subobj.getFloat(), SubobjType);
case APValue::ComplexInt:
case APValue::ComplexFloat:
// FIXME: Implement complex compound assignment.
Info.Diag(E);
return false;
case APValue::LValue:
return foundPointer(Subobj, SubobjType);
default:
// FIXME: can this happen?
Info.Diag(E);
return false;
}
}
bool found(APSInt &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
if (!SubobjType->isIntegerType() || !RHS.isInt()) {
// We don't support compound assignment on integer-cast-to-pointer
// values.
Info.Diag(E);
return false;
}
APSInt LHS = HandleIntToIntCast(Info, E, PromotedLHSType,
SubobjType, Value);
if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS))
return false;
Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
return true;
}
bool found(APFloat &Value, QualType SubobjType) {
return checkConst(SubobjType) &&
HandleFloatToFloatCast(Info, E, SubobjType, PromotedLHSType,
Value) &&
handleFloatFloatBinOp(Info, E, Value, Opcode, RHS.getFloat()) &&
HandleFloatToFloatCast(Info, E, PromotedLHSType, SubobjType, Value);
}
bool foundPointer(APValue &Subobj, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
QualType PointeeType;
if (const PointerType *PT = SubobjType->getAs<PointerType>())
PointeeType = PT->getPointeeType();
if (PointeeType.isNull() || !RHS.isInt() ||
(Opcode != BO_Add && Opcode != BO_Sub)) {
Info.Diag(E);
return false;
}
int64_t Offset = getExtValue(RHS.getInt());
if (Opcode == BO_Sub)
Offset = -Offset;
LValue LVal;
LVal.setFrom(Info.Ctx, Subobj);
if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType, Offset))
return false;
LVal.moveInto(Subobj);
return true;
}
bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
llvm_unreachable("shouldn't encounter string elements here");
}
};
} // end anonymous namespace
const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
/// Perform a compound assignment of LVal <op>= RVal.
static bool handleCompoundAssignment(
EvalInfo &Info, const Expr *E,
const LValue &LVal, QualType LValType, QualType PromotedLValType,
BinaryOperatorKind Opcode, const APValue &RVal) {
if (LVal.Designator.Invalid)
return false;
if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode,
RVal };
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
namespace {
struct IncDecSubobjectHandler {
EvalInfo &Info;
const Expr *E;
AccessKinds AccessKind;
APValue *Old;
typedef bool result_type;
bool checkConst(QualType QT) {
// Assigning to a const object has undefined behavior.
if (QT.isConstQualified()) {
Info.Diag(E, diag::note_constexpr_modify_const_type) << QT;
return false;
}
return true;
}
bool failed() { return false; }
bool found(APValue &Subobj, QualType SubobjType) {
// Stash the old value. Also clear Old, so we don't clobber it later
// if we're post-incrementing a complex.
if (Old) {
*Old = Subobj;
Old = nullptr;
}
switch (Subobj.getKind()) {
case APValue::Int:
return found(Subobj.getInt(), SubobjType);
case APValue::Float:
return found(Subobj.getFloat(), SubobjType);
case APValue::ComplexInt:
return found(Subobj.getComplexIntReal(),
SubobjType->castAs<ComplexType>()->getElementType()
.withCVRQualifiers(SubobjType.getCVRQualifiers()));
case APValue::ComplexFloat:
return found(Subobj.getComplexFloatReal(),
SubobjType->castAs<ComplexType>()->getElementType()
.withCVRQualifiers(SubobjType.getCVRQualifiers()));
case APValue::LValue:
return foundPointer(Subobj, SubobjType);
default:
// FIXME: can this happen?
Info.Diag(E);
return false;
}
}
bool found(APSInt &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
if (!SubobjType->isIntegerType()) {
// We don't support increment / decrement on integer-cast-to-pointer
// values.
Info.Diag(E);
return false;
}
if (Old) *Old = APValue(Value);
// bool arithmetic promotes to int, and the conversion back to bool
// doesn't reduce mod 2^n, so special-case it.
if (SubobjType->isBooleanType()) {
if (AccessKind == AK_Increment)
Value = 1;
else
Value = !Value;
return true;
}
bool WasNegative = Value.isNegative();
if (AccessKind == AK_Increment) {
++Value;
if (!WasNegative && Value.isNegative() &&
isOverflowingIntegerType(Info.Ctx, SubobjType)) {
APSInt ActualValue(Value, /*IsUnsigned*/true);
HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
--Value;
if (WasNegative && !Value.isNegative() &&
isOverflowingIntegerType(Info.Ctx, SubobjType)) {
unsigned BitWidth = Value.getBitWidth();
APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
ActualValue.setBit(BitWidth);
HandleOverflow(Info, E, ActualValue, SubobjType);
}
}
return true;
}
bool found(APFloat &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
if (Old) *Old = APValue(Value);
APFloat One(Value.getSemantics(), 1);
if (AccessKind == AK_Increment)
Value.add(One, APFloat::rmNearestTiesToEven);
else
Value.subtract(One, APFloat::rmNearestTiesToEven);
return true;
}
bool foundPointer(APValue &Subobj, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
QualType PointeeType;
if (const PointerType *PT = SubobjType->getAs<PointerType>())
PointeeType = PT->getPointeeType();
else {
Info.Diag(E);
return false;
}
LValue LVal;
LVal.setFrom(Info.Ctx, Subobj);
if (!HandleLValueArrayAdjustment(Info, E, LVal, PointeeType,
AccessKind == AK_Increment ? 1 : -1))
return false;
LVal.moveInto(Subobj);
return true;
}
bool foundString(APValue &Subobj, QualType SubobjType, uint64_t Character) {
llvm_unreachable("shouldn't encounter string elements here");
}
};
} // end anonymous namespace
/// Perform an increment or decrement on LVal.
static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
QualType LValType, bool IsIncrement, APValue *Old) {
if (LVal.Designator.Invalid)
return false;
if (!Info.getLangOpts().CPlusPlus14) {
Info.Diag(E);
return false;
}
AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
IncDecSubobjectHandler Handler = { Info, E, AK, Old };
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
/// Build an lvalue for the object argument of a member function call.
static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
LValue &This) {
if (Object->getType()->isPointerType())
return EvaluatePointer(Object, This, Info);
if (Object->isGLValue())
return EvaluateLValue(Object, This, Info);
if (Object->getType()->isLiteralType(Info.Ctx))
return EvaluateTemporary(Object, This, Info);
Info.Diag(Object, diag::note_constexpr_nonliteral) << Object->getType();
return false;
}
/// HandleMemberPointerAccess - Evaluate a member access operation and build an
/// lvalue referring to the result.
///
/// \param Info - Information about the ongoing evaluation.
/// \param LV - An lvalue referring to the base of the member pointer.
/// \param RHS - The member pointer expression.
/// \param IncludeMember - Specifies whether the member itself is included in
/// the resulting LValue subobject designator. This is not possible when
/// creating a bound member function.
/// \return The field or method declaration to which the member pointer refers,
/// or 0 if evaluation fails.
static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
QualType LVType,
LValue &LV,
const Expr *RHS,
bool IncludeMember = true) {
MemberPtr MemPtr;
if (!EvaluateMemberPointer(RHS, MemPtr, Info))
return nullptr;
// C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
// member value, the behavior is undefined.
if (!MemPtr.getDecl()) {
// FIXME: Specific diagnostic.
Info.Diag(RHS);
return nullptr;
}
if (MemPtr.isDerivedMember()) {
// This is a member of some derived class. Truncate LV appropriately.
// The end of the derived-to-base path for the base object must match the
// derived-to-base path for the member pointer.
if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
LV.Designator.Entries.size()) {
Info.Diag(RHS);
return nullptr;
}
unsigned PathLengthToMember =
LV.Designator.Entries.size() - MemPtr.Path.size();
for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
const CXXRecordDecl *LVDecl = getAsBaseClass(
LV.Designator.Entries[PathLengthToMember + I]);
const CXXRecordDecl *MPDecl = MemPtr.Path[I];
if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
Info.Diag(RHS);
return nullptr;
}
}
// Truncate the lvalue to the appropriate derived class.
if (!CastToDerivedClass(Info, RHS, LV, MemPtr.getContainingRecord(),
PathLengthToMember))
return nullptr;
} else if (!MemPtr.Path.empty()) {
// Extend the LValue path with the member pointer's path.
LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
MemPtr.Path.size() + IncludeMember);
// Walk down to the appropriate base class.
if (const PointerType *PT = LVType->getAs<PointerType>())
LVType = PT->getPointeeType();
const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
assert(RD && "member pointer access on non-class-type expression");
// The first class in the path is that of the lvalue.
for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
if (!HandleLValueDirectBase(Info, RHS, LV, RD, Base))
return nullptr;
RD = Base;
}
// Finally cast to the class containing the member.
if (!HandleLValueDirectBase(Info, RHS, LV, RD,
MemPtr.getContainingRecord()))
return nullptr;
}
// Add the member. Note that we cannot build bound member functions here.
if (IncludeMember) {
if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl())) {
if (!HandleLValueMember(Info, RHS, LV, FD))
return nullptr;
} else if (const IndirectFieldDecl *IFD =
dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
if (!HandleLValueIndirectMember(Info, RHS, LV, IFD))
return nullptr;
} else {
llvm_unreachable("can't construct reference to bound member function");
}
}
return MemPtr.getDecl();
}
static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
const BinaryOperator *BO,
LValue &LV,
bool IncludeMember = true) {
assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
if (!EvaluateObjectArgument(Info, BO->getLHS(), LV)) {
if (Info.keepEvaluatingAfterFailure()) {
MemberPtr MemPtr;
EvaluateMemberPointer(BO->getRHS(), MemPtr, Info);
}
return nullptr;
}
return HandleMemberPointerAccess(Info, BO->getLHS()->getType(), LV,
BO->getRHS(), IncludeMember);
}
/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
/// the provided lvalue, which currently refers to the base object.
static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
LValue &Result) {
SubobjectDesignator &D = Result.Designator;
if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
return false;
QualType TargetQT = E->getType();
if (const PointerType *PT = TargetQT->getAs<PointerType>())
TargetQT = PT->getPointeeType();
// Check this cast lands within the final derived-to-base subobject path.
if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
<< D.MostDerivedType << TargetQT;
return false;
}
// Check the type of the final cast. We don't need to check the path,
// since a cast can only be formed if the path is unique.
unsigned NewEntriesSize = D.Entries.size() - E->path_size();
const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
const CXXRecordDecl *FinalType;
if (NewEntriesSize == D.MostDerivedPathLength)
FinalType = D.MostDerivedType->getAsCXXRecordDecl();
else
FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
<< D.MostDerivedType << TargetQT;
return false;
}
// Truncate the lvalue to the appropriate derived class.
return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
}
namespace {
enum EvalStmtResult {
/// Evaluation failed.
ESR_Failed,
/// Hit a 'return' statement.
ESR_Returned,
/// Evaluation succeeded.
ESR_Succeeded,
/// Hit a 'continue' statement.
ESR_Continue,
/// Hit a 'break' statement.
ESR_Break,
/// Still scanning for 'case' or 'default' statement.
ESR_CaseNotFound
};
}
static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// We don't need to evaluate the initializer for a static local.
if (!VD->hasLocalStorage())
return true;
LValue Result;
Result.set(VD, Info.CurrentCall->Index);
APValue &Val = Info.CurrentCall->createTemporary(VD, true);
const Expr *InitE = VD->getInit();
if (!InitE) {
Info.Diag(D->getLocStart(), diag::note_constexpr_uninitialized)
<< false << VD->getType();
Val = APValue();
return false;
}
if (InitE->isValueDependent())
return false;
if (!EvaluateInPlace(Val, Info, Result, InitE)) {
// Wipe out any partially-computed value, to allow tracking that this
// evaluation failed.
Val = APValue();
return false;
}
}
return true;
}
/// Evaluate a condition (either a variable declaration or an expression).
static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
const Expr *Cond, bool &Result) {
FullExpressionRAII Scope(Info);
if (CondDecl && !EvaluateDecl(Info, CondDecl))
return false;
return EvaluateAsBooleanCondition(Cond, Result, Info);
}
static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
const Stmt *S,
const SwitchCase *SC = nullptr);
/// Evaluate the body of a loop, and translate the result as appropriate.
static EvalStmtResult EvaluateLoopBody(APValue &Result, EvalInfo &Info,
const Stmt *Body,
const SwitchCase *Case = nullptr) {
BlockScopeRAII Scope(Info);
switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, Body, Case)) {
case ESR_Break:
return ESR_Succeeded;
case ESR_Succeeded:
case ESR_Continue:
return ESR_Continue;
case ESR_Failed:
case ESR_Returned:
case ESR_CaseNotFound:
return ESR;
}
llvm_unreachable("Invalid EvalStmtResult!");
}
/// Evaluate a switch statement.
static EvalStmtResult EvaluateSwitch(APValue &Result, EvalInfo &Info,
const SwitchStmt *SS) {
BlockScopeRAII Scope(Info);
// Evaluate the switch condition.
APSInt Value;
{
FullExpressionRAII Scope(Info);
if (SS->getConditionVariable() &&
!EvaluateDecl(Info, SS->getConditionVariable()))
return ESR_Failed;
if (!EvaluateInteger(SS->getCond(), Value, Info))
return ESR_Failed;
}
// Find the switch case corresponding to the value of the condition.
// FIXME: Cache this lookup.
const SwitchCase *Found = nullptr;
for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
if (isa<DefaultStmt>(SC)) {
Found = SC;
continue;
}
const CaseStmt *CS = cast<CaseStmt>(SC);
APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx);
APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx)
: LHS;
if (LHS <= Value && Value <= RHS) {
Found = SC;
break;
}
}
if (!Found)
return ESR_Succeeded;
// Search the switch body for the switch case and evaluate it from there.
switch (EvalStmtResult ESR = EvaluateStmt(Result, Info, SS->getBody(), Found)) {
case ESR_Break:
return ESR_Succeeded;
case ESR_Succeeded:
case ESR_Continue:
case ESR_Failed:
case ESR_Returned:
return ESR;
case ESR_CaseNotFound:
// This can only happen if the switch case is nested within a statement
// expression. We have no intention of supporting that.
Info.Diag(Found->getLocStart(), diag::note_constexpr_stmt_expr_unsupported);
return ESR_Failed;
}
llvm_unreachable("Invalid EvalStmtResult!");
}
// Evaluate a statement.
static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
const Stmt *S, const SwitchCase *Case) {
if (!Info.nextStep(S))
return ESR_Failed;
// If we're hunting down a 'case' or 'default' label, recurse through
// substatements until we hit the label.
if (Case) {
// FIXME: We don't start the lifetime of objects whose initialization we
// jump over. However, such objects must be of class type with a trivial
// default constructor that initialize all subobjects, so must be empty,
// so this almost never matters.
switch (S->getStmtClass()) {
case Stmt::CompoundStmtClass:
// FIXME: Precompute which substatement of a compound statement we
// would jump to, and go straight there rather than performing a
// linear scan each time.
case Stmt::LabelStmtClass:
case Stmt::AttributedStmtClass:
case Stmt::DoStmtClass:
break;
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
if (Case == S)
Case = nullptr;
break;
case Stmt::IfStmtClass: {
// FIXME: Precompute which side of an 'if' we would jump to, and go
// straight there rather than scanning both sides.
const IfStmt *IS = cast<IfStmt>(S);
// Wrap the evaluation in a block scope, in case it's a DeclStmt
// preceded by our switch label.
BlockScopeRAII Scope(Info);
EvalStmtResult ESR = EvaluateStmt(Result, Info, IS->getThen(), Case);
if (ESR != ESR_CaseNotFound || !IS->getElse())
return ESR;
return EvaluateStmt(Result, Info, IS->getElse(), Case);
}
case Stmt::WhileStmtClass: {
EvalStmtResult ESR =
EvaluateLoopBody(Result, Info, cast<WhileStmt>(S)->getBody(), Case);
if (ESR != ESR_Continue)
return ESR;
break;
}
case Stmt::ForStmtClass: {
const ForStmt *FS = cast<ForStmt>(S);
EvalStmtResult ESR =
EvaluateLoopBody(Result, Info, FS->getBody(), Case);
if (ESR != ESR_Continue)
return ESR;
if (FS->getInc()) {
FullExpressionRAII IncScope(Info);
if (!EvaluateIgnoredValue(Info, FS->getInc()))
return ESR_Failed;
}
break;
}
case Stmt::DeclStmtClass:
// FIXME: If the variable has initialization that can't be jumped over,
// bail out of any immediately-surrounding compound-statement too.
default:
return ESR_CaseNotFound;
}
}
switch (S->getStmtClass()) {
default:
if (const Expr *E = dyn_cast<Expr>(S)) {
// Don't bother evaluating beyond an expression-statement which couldn't
// be evaluated.
FullExpressionRAII Scope(Info);
if (!EvaluateIgnoredValue(Info, E))
return ESR_Failed;
return ESR_Succeeded;
}
Info.Diag(S->getLocStart());
return ESR_Failed;
case Stmt::NullStmtClass:
return ESR_Succeeded;
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
for (const auto *DclIt : DS->decls()) {
// Each declaration initialization is its own full-expression.
// FIXME: This isn't quite right; if we're performing aggregate
// initialization, each braced subexpression is its own full-expression.
FullExpressionRAII Scope(Info);
if (!EvaluateDecl(Info, DclIt) && !Info.keepEvaluatingAfterFailure())
return ESR_Failed;
}
return ESR_Succeeded;
}
case Stmt::ReturnStmtClass: {
const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
FullExpressionRAII Scope(Info);
if (RetExpr && !Evaluate(Result, Info, RetExpr))
return ESR_Failed;
return ESR_Returned;
}
case Stmt::CompoundStmtClass: {
BlockScopeRAII Scope(Info);
const CompoundStmt *CS = cast<CompoundStmt>(S);
for (const auto *BI : CS->body()) {
EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case);
if (ESR == ESR_Succeeded)
Case = nullptr;
else if (ESR != ESR_CaseNotFound)
return ESR;
}
return Case ? ESR_CaseNotFound : ESR_Succeeded;
}
case Stmt::IfStmtClass: {
const IfStmt *IS = cast<IfStmt>(S);
// Evaluate the condition, as either a var decl or as an expression.
BlockScopeRAII Scope(Info);
bool Cond;
if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond))
return ESR_Failed;
if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
EvalStmtResult ESR = EvaluateStmt(Result, Info, SubStmt);
if (ESR != ESR_Succeeded)
return ESR;
}
return ESR_Succeeded;
}
case Stmt::WhileStmtClass: {
const WhileStmt *WS = cast<WhileStmt>(S);
while (true) {
BlockScopeRAII Scope(Info);
bool Continue;
if (!EvaluateCond(Info, WS->getConditionVariable(), WS->getCond(),
Continue))
return ESR_Failed;
if (!Continue)
break;
EvalStmtResult ESR = EvaluateLoopBody(Result, Info, WS->getBody());
if (ESR != ESR_Continue)
return ESR;
}
return ESR_Succeeded;
}
case Stmt::DoStmtClass: {
const DoStmt *DS = cast<DoStmt>(S);
bool Continue;
do {
EvalStmtResult ESR = EvaluateLoopBody(Result, Info, DS->getBody(), Case);
if (ESR != ESR_Continue)
return ESR;
Case = nullptr;
FullExpressionRAII CondScope(Info);
if (!EvaluateAsBooleanCondition(DS->getCond(), Continue, Info))
return ESR_Failed;
} while (Continue);
return ESR_Succeeded;
}
case Stmt::ForStmtClass: {
const ForStmt *FS = cast<ForStmt>(S);
BlockScopeRAII Scope(Info);
if (FS->getInit()) {
EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit());
if (ESR != ESR_Succeeded)
return ESR;
}
while (true) {
BlockScopeRAII Scope(Info);
bool Continue = true;
if (FS->getCond() && !EvaluateCond(Info, FS->getConditionVariable(),
FS->getCond(), Continue))
return ESR_Failed;
if (!Continue)
break;
EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody());
if (ESR != ESR_Continue)
return ESR;
if (FS->getInc()) {
FullExpressionRAII IncScope(Info);
if (!EvaluateIgnoredValue(Info, FS->getInc()))
return ESR_Failed;
}
}
return ESR_Succeeded;
}
case Stmt::CXXForRangeStmtClass: {
const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S);
BlockScopeRAII Scope(Info);
// Initialize the __range variable.
EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt());
if (ESR != ESR_Succeeded)
return ESR;
// Create the __begin and __end iterators.
ESR = EvaluateStmt(Result, Info, FS->getBeginEndStmt());
if (ESR != ESR_Succeeded)
return ESR;
while (true) {
// Condition: __begin != __end.
{
bool Continue = true;
FullExpressionRAII CondExpr(Info);
if (!EvaluateAsBooleanCondition(FS->getCond(), Continue, Info))
return ESR_Failed;
if (!Continue)
break;
}
// User's variable declaration, initialized by *__begin.
BlockScopeRAII InnerScope(Info);
ESR = EvaluateStmt(Result, Info, FS->getLoopVarStmt());
if (ESR != ESR_Succeeded)
return ESR;
// Loop body.
ESR = EvaluateLoopBody(Result, Info, FS->getBody());
if (ESR != ESR_Continue)
return ESR;
// Increment: ++__begin
if (!EvaluateIgnoredValue(Info, FS->getInc()))
return ESR_Failed;
}
return ESR_Succeeded;
}
case Stmt::SwitchStmtClass:
return EvaluateSwitch(Result, Info, cast<SwitchStmt>(S));
case Stmt::ContinueStmtClass:
return ESR_Continue;
case Stmt::BreakStmtClass:
return ESR_Break;
case Stmt::LabelStmtClass:
return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case);
case Stmt::AttributedStmtClass:
// As a general principle, C++11 attributes can be ignored without
// any semantic impact.
return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(),
Case);
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case);
}
}
/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
/// default constructor. If so, we'll fold it whether or not it's marked as
/// constexpr. If it is marked as constexpr, we will never implicitly define it,
/// so we need special handling.
static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
const CXXConstructorDecl *CD,
bool IsValueInitialization) {
if (!CD->isTrivial() || !CD->isDefaultConstructor())
return false;
// Value-initialization does not call a trivial default constructor, so such a
// call is a core constant expression whether or not the constructor is
// constexpr.
if (!CD->isConstexpr() && !IsValueInitialization) {
if (Info.getLangOpts().CPlusPlus11) {
// FIXME: If DiagDecl is an implicitly-declared special member function,
// we should be much more explicit about why it's not constexpr.
Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
<< /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
Info.Note(CD->getLocation(), diag::note_declared_at);
} else {
Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
}
}
return true;
}
/// CheckConstexprFunction - Check that a function can be called in a constant
/// expression.
static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Declaration,
const FunctionDecl *Definition) {
// Potential constant expressions can contain calls to declared, but not yet
// defined, constexpr functions.
if (Info.checkingPotentialConstantExpression() && !Definition &&
Declaration->isConstexpr())
return false;
// Bail out with no diagnostic if the function declaration itself is invalid.
// We will have produced a relevant diagnostic while parsing it.
if (Declaration->isInvalidDecl())
return false;
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl())
return true;
if (Info.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
// FIXME: If DiagDecl is an implicitly-declared special member function, we
// should be much more explicit about why it's not constexpr.
Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1)
<< DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl)
<< DiagDecl;
Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
} else {
Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
}
return false;
}
/// Determine if a class has any fields that might need to be copied by a
/// trivial copy or move operation.
static bool hasFields(const CXXRecordDecl *RD) {
if (!RD || RD->isEmpty())
return false;
for (auto *FD : RD->fields()) {
if (FD->isUnnamedBitfield())
continue;
return true;
}
for (auto &Base : RD->bases())
if (hasFields(Base.getType()->getAsCXXRecordDecl()))
return true;
return false;
}
namespace {
typedef SmallVector<APValue, 8> ArgVector;
}
/// EvaluateArgs - Evaluate the arguments to a function call.
static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
EvalInfo &Info) {
bool Success = true;
for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
// If we're checking for a potential constant expression, evaluate all
// initializers even if some of them fail.
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
}
}
return Success;
}
// HLSL Change Starts
/// Evaluate an HLSL intrinsic call.
static bool HandleIntrinsicCall(SourceLocation CallLoc, unsigned opcode,
const LValue *This, ArrayRef<const Expr *> Args,
EvalInfo &Info, APValue &Result) {
ArgVector ArgValues(Args.size());
if (!EvaluateArgs(Args, ArgValues, Info))
return false;
if (!Info.CheckCallLimit(CallLoc))
return false;
switch ((hlsl::IntrinsicOp)opcode) {
case hlsl::IntrinsicOp::IOP_asuint:
assert(Args.size() == 1 && "else call should be invalid");
if (ArgValues[0].isInt()) {
Result = ArgValues[0];
}
else if (ArgValues[0].isFloat()) {
const bool isUnsignedTrue = true;
Result = APValue(APSInt(ArgValues[0].getFloat().bitcastToAPInt(), isUnsignedTrue));
}
else {
// TODO: consider a better error message here
Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
}
return true;
case hlsl::IntrinsicOp::IOP_max:
assert(Args.size() == 2 && "else call should be invalid");
assert(ArgValues[0].getKind() == ArgValues[1].getKind() && "else call is invalid");
if (ArgValues[0].isInt()) {
Result = ArgValues[0].getInt() > ArgValues[1].getInt() ? ArgValues[0] : ArgValues[1];
}
else if (ArgValues[0].isFloat()) {
// TODO: handle NaNs properly
APFloat::cmpResult r = ArgValues[0].getFloat().compare(ArgValues[1].getFloat());
Result = (r == APFloat::cmpGreaterThan) ? ArgValues[0] : ArgValues[1];
}
else {
// TODO: consider a better error message here
Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
}
return true;
case hlsl::IntrinsicOp::IOP_min:
assert(Args.size() == 2 && "else call should be invalid");
assert(ArgValues[0].getKind() == ArgValues[1].getKind() && "else call is invalid");
if (ArgValues[0].isInt()) {
Result = ArgValues[0].getInt() < ArgValues[1].getInt() ? ArgValues[0] : ArgValues[1];
}
else if (ArgValues[0].isFloat()) {
// TODO: handle NaNs properly
APFloat::cmpResult r = ArgValues[0].getFloat().compare(ArgValues[1].getFloat());
Result = (r == APFloat::cmpLessThan) ? ArgValues[0] : ArgValues[1];
}
else {
// TODO: consider a better error message here
Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
}
return true;
default:
Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
}
}
// HLSL Change Ends
/// Evaluate a function call.
static bool HandleFunctionCall(SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
ArrayRef<const Expr*> Args, const Stmt *Body,
EvalInfo &Info, APValue &Result) {
ArgVector ArgValues(Args.size());
if (!EvaluateArgs(Args, ArgValues, Info))
return false;
if (!Info.CheckCallLimit(CallLoc))
return false;
CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
// For a trivial copy or move assignment, perform an APValue copy. This is
// essential for unions, where the operations performed by the assignment
// operator cannot be represented as statements.
//
// Skip this for non-union classes with no fields; in that case, the defaulted
// copy/move does not actually read the object.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
if (MD && MD->isDefaulted() &&
(MD->getParent()->isUnion() ||
(MD->isTrivial() && hasFields(MD->getParent())))) {
assert(This &&
(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
LValue RHS;
RHS.setFrom(Info.Ctx, ArgValues[0]);
APValue RHSValue;
if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
RHS, RHSValue))
return false;
if (!handleAssignment(Info, Args[0], *This, MD->getThisType(Info.Ctx),
RHSValue))
return false;
This->moveInto(Result);
return true;
}
EvalStmtResult ESR = EvaluateStmt(Result, Info, Body);
if (ESR == ESR_Succeeded) {
if (Callee->getReturnType()->isVoidType())
return true;
Info.Diag(Callee->getLocEnd(), diag::note_constexpr_no_return);
}
return ESR == ESR_Returned;
}
/// Evaluate a constructor call.
static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
ArrayRef<const Expr*> Args,
const CXXConstructorDecl *Definition,
EvalInfo &Info, APValue &Result) {
ArgVector ArgValues(Args.size());
if (!EvaluateArgs(Args, ArgValues, Info))
return false;
if (!Info.CheckCallLimit(CallLoc))
return false;
const CXXRecordDecl *RD = Definition->getParent();
if (RD->getNumVBases()) {
Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD;
return false;
}
CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
// If it's a delegating constructor, just delegate.
if (Definition->isDelegatingConstructor()) {
CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
{
FullExpressionRAII InitScope(Info);
if (!EvaluateInPlace(Result, Info, This, (*I)->getInit()))
return false;
}
return EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed;
}
// For a trivial copy or move constructor, perform an APValue copy. This is
// essential for unions (or classes with anonymous union members), where the
// operations performed by the constructor cannot be represented by
// ctor-initializers.
//
// Skip this for empty non-union classes; we should not perform an
// lvalue-to-rvalue conversion on them because their copy constructor does not
// actually read them.
if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
(Definition->getParent()->isUnion() ||
(Definition->isTrivial() && hasFields(Definition->getParent())))) {
LValue RHS;
RHS.setFrom(Info.Ctx, ArgValues[0]);
return handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
RHS, Result);
}
// Reserve space for the struct members.
if (!RD->isUnion() && Result.isUninit())
Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
std::distance(RD->field_begin(), RD->field_end()));
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
// A scope for temporaries lifetime-extended by reference members.
BlockScopeRAII LifetimeExtendedScope(Info);
bool Success = true;
unsigned BasesSeen = 0;
#ifndef NDEBUG
CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
#endif
for (const auto *I : Definition->inits()) {
LValue Subobject = This;
APValue *Value = &Result;
// Determine the subobject to initialize.
FieldDecl *FD = nullptr;
if (I->isBaseInitializer()) {
QualType BaseType(I->getBaseClass(), 0);
#ifndef NDEBUG
// Non-virtual base classes are initialized in the order in the class
// definition. We have already checked for virtual base classes.
assert(!BaseIt->isVirtual() && "virtual base for literal type");
assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
"base class initializers not in expected order");
++BaseIt;
#endif
if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD,
BaseType->getAsCXXRecordDecl(), &Layout))
return false;
Value = &Result.getStructBase(BasesSeen++);
} else if ((FD = I->getMember())) {
if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout))
return false;
if (RD->isUnion()) {
Result = APValue(FD);
Value = &Result.getUnionValue();
} else {
Value = &Result.getStructField(FD->getFieldIndex());
}
} else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
// Walk the indirect field decl's chain to find the object to initialize,
// and make sure we've initialized every step along it.
for (auto *C : IFD->chain()) {
FD = cast<FieldDecl>(C);
CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
// Switch the union field if it differs. This happens if we had
// preceding zero-initialization, and we're now initializing a union
// subobject other than the first.
// FIXME: In this case, the values of the other subobjects are
// specified, since zero-initialization sets all padding bits to zero.
if (Value->isUninit() ||
(Value->isUnion() && Value->getUnionField() != FD)) {
if (CD->isUnion())
*Value = APValue(FD);
else
*Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
std::distance(CD->field_begin(), CD->field_end()));
}
if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
return false;
if (CD->isUnion())
Value = &Value->getUnionValue();
else
Value = &Value->getStructField(FD->getFieldIndex());
}
} else {
llvm_unreachable("unknown base initializer kind");
}
FullExpressionRAII InitScope(Info);
if (!EvaluateInPlace(*Value, Info, Subobject, I->getInit()) ||
(FD && FD->isBitField() && !truncateBitfieldValue(Info, I->getInit(),
*Value, FD))) {
// If we're checking for a potential constant expression, evaluate all
// initializers even if some of them fail.
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
}
}
return Success &&
EvaluateStmt(Result, Info, Definition->getBody()) != ESR_Failed;
}
//===----------------------------------------------------------------------===//
// Generic Evaluation
//===----------------------------------------------------------------------===//
namespace {
template <class Derived>
class ExprEvaluatorBase
: public ConstStmtVisitor<Derived, bool> {
private:
bool DerivedSuccess(const APValue &V, const Expr *E) {
return static_cast<Derived*>(this)->Success(V, E);
}
bool DerivedZeroInitialization(const Expr *E) {
return static_cast<Derived*>(this)->ZeroInitialization(E);
}
// Check whether a conditional operator with a non-constant condition is a
// potential constant expression. If neither arm is a potential constant
// expression, then the conditional operator is not either.
template<typename ConditionalOperator>
void CheckPotentialConstantConditional(const ConditionalOperator *E) {
assert(Info.checkingPotentialConstantExpression());
// Speculatively evaluate both arms.
{
SmallVector<PartialDiagnosticAt, 8> Diag;
SpeculativeEvaluationRAII Speculate(Info, &Diag);
StmtVisitorTy::Visit(E->getFalseExpr());
if (Diag.empty())
return;
Diag.clear();
StmtVisitorTy::Visit(E->getTrueExpr());
if (Diag.empty())
return;
}
Error(E, diag::note_constexpr_conditional_never_const);
}
template<typename ConditionalOperator>
bool HandleConditionalOperator(const ConditionalOperator *E) {
bool BoolResult;
if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
if (Info.checkingPotentialConstantExpression())
CheckPotentialConstantConditional(E);
return false;
}
Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
return StmtVisitorTy::Visit(EvalExpr);
}
protected:
EvalInfo &Info;
typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
return Info.CCEDiag(E, D);
}
bool ZeroInitialization(const Expr *E) { return Error(E); }
public:
ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
EvalInfo &getEvalInfo() { return Info; }
/// Report an evaluation error. This should only be called when an error is
/// first discovered. When propagating an error, just return false.
bool Error(const Expr *E, diag::kind D) {
Info.Diag(E, D);
return false;
}
bool Error(const Expr *E) {
return Error(E, diag::note_invalid_subexpr_in_const_expr);
}
bool VisitStmt(const Stmt *) {
llvm_unreachable("Expression evaluator should not be called on stmts");
}
bool VisitExpr(const Expr *E) {
return Error(E);
}
bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryExtension(const UnaryOperator *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryPlus(const UnaryOperator *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitChooseExpr(const ChooseExpr *E)
{ return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
{ return StmtVisitorTy::Visit(E->getResultExpr()); }
bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
{ return StmtVisitorTy::Visit(E->getReplacement()); }
bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
{ return StmtVisitorTy::Visit(E->getExpr()); }
bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
// The initializer may not have been parsed yet, or might be erroneous.
if (!E->getExpr())
return Error(E);
return StmtVisitorTy::Visit(E->getExpr());
}
// We cannot create any objects for which cleanups are required, so there is
// nothing to do here; all cleanups must come from unevaluated subexpressions.
bool VisitExprWithCleanups(const ExprWithCleanups *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
bool VisitBinaryOperator(const BinaryOperator *E) {
switch (E->getOpcode()) {
default:
return Error(E);
case BO_Comma:
VisitIgnoredValue(E->getLHS());
return StmtVisitorTy::Visit(E->getRHS());
case BO_PtrMemD:
case BO_PtrMemI: {
LValue Obj;
if (!HandleMemberPointerAccess(Info, E, Obj))
return false;
APValue Result;
if (!handleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
return false;
return DerivedSuccess(Result, E);
}
}
}
bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
// Evaluate and cache the common expression. We treat it as a temporary,
// even though it's not quite the same thing.
if (!Evaluate(Info.CurrentCall->createTemporary(E->getOpaqueValue(), false),
Info, E->getCommon()))
return false;
return HandleConditionalOperator(E);
}
bool VisitConditionalOperator(const ConditionalOperator *E) {
bool IsBcpCall = false;
// If the condition (ignoring parens) is a __builtin_constant_p call,
// the result is a constant expression if it can be folded without
// side-effects. This is an important GNU extension. See GCC PR38377
// for discussion.
if (const CallExpr *CallCE =
dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
IsBcpCall = true;
// Always assume __builtin_constant_p(...) ? ... : ... is a potential
// constant expression; we can't check whether it's potentially foldable.
if (Info.checkingPotentialConstantExpression() && IsBcpCall)
return false;
FoldConstant Fold(Info, IsBcpCall);
if (!HandleConditionalOperator(E)) {
Fold.keepDiagnostics();
return false;
}
return true;
}
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
if (APValue *Value = Info.CurrentCall->getTemporary(E))
return DerivedSuccess(*Value, E);
const Expr *Source = E->getSourceExpr();
if (!Source)
return Error(E);
if (Source == E) { // sanity checking.
assert(0 && "OpaqueValueExpr recursively refers to itself");
return Error(E);
}
return StmtVisitorTy::Visit(Source);
}
bool VisitCallExpr(const CallExpr *E) {
const Expr *Callee = E->getCallee()->IgnoreParens();
QualType CalleeType = Callee->getType();
const FunctionDecl *FD = nullptr;
LValue *This = nullptr, ThisVal;
auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
bool HasQualifier = false;
// Extract function decl and 'this' pointer from the callee.
if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
const ValueDecl *Member = nullptr;
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
// Explicit bound member calls, such as x.f() or p->g();
if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
return false;
Member = ME->getMemberDecl();
This = &ThisVal;
HasQualifier = ME->hasQualifier();
} else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
// Indirect bound member calls ('.*' or '->*').
Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
if (!Member) return false;
This = &ThisVal;
} else
return Error(Callee);
FD = dyn_cast<FunctionDecl>(Member);
if (!FD)
return Error(Callee);
} else if (CalleeType->isFunctionPointerType()) {
LValue Call;
if (!EvaluatePointer(Callee, Call, Info))
return false;
if (!Call.getLValueOffset().isZero())
return Error(Callee);
FD = dyn_cast_or_null<FunctionDecl>(
Call.getLValueBase().dyn_cast<const ValueDecl*>());
if (!FD)
return Error(Callee);
// Overloaded operator calls to member functions are represented as normal
// calls with '*this' as the first argument.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && !MD->isStatic()) {
// FIXME: When selecting an implicit conversion for an overloaded
// operator delete, we sometimes try to evaluate calls to conversion
// operators without a 'this' parameter!
if (Args.empty())
return Error(E);
if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
return false;
This = &ThisVal;
Args = Args.slice(1);
}
// Don't call function pointers which have been cast to some other type.
if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
return Error(E);
} else
return Error(E);
if (This && !This->checkSubobject(Info, E, CSK_This))
return false;
// HLSL Changes Start
{
unsigned opcode;
StringRef group;
if (hlsl::GetIntrinsicOp(FD, opcode, group)) {
APValue opResult;
if (!HandleIntrinsicCall(E->getExprLoc(), opcode, This, Args, Info,
opResult)) {
return false;
}
return DerivedSuccess(opResult, E);
}
}
// HLSL Changes End
// DR1358 allows virtual constexpr functions in some cases. Don't allow
// calls to such functions in constant expressions.
if (This && !HasQualifier &&
isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
return Error(E, diag::note_constexpr_virtual_call);
const FunctionDecl *Definition = nullptr;
Stmt *Body = FD->getBody(Definition);
APValue Result;
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
!HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body,
Info, Result))
return false;
return DerivedSuccess(Result, E);
}
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
return StmtVisitorTy::Visit(E->getInitializer());
}
bool VisitInitListExpr(const InitListExpr *E) {
if (E->getNumInits() == 0)
return DerivedZeroInitialization(E);
if (Info.getLangOpts().HLSL && !E->getType()->isScalarType() && !IsHLSLVecInitList(E)) return Error(E); // HLSL Change
if (E->getNumInits() == 1)
return StmtVisitorTy::Visit(E->getInit(0));
return Error(E);
}
bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return DerivedZeroInitialization(E);
}
bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return DerivedZeroInitialization(E);
}
bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
return DerivedZeroInitialization(E);
}
/// A member expression where the object is a prvalue is itself a prvalue.
bool VisitMemberExpr(const MemberExpr *E) {
assert(!E->isArrow() && "missing call to bound member function?");
APValue Val;
if (!Evaluate(Val, Info, E->getBase()))
return false;
QualType BaseTy = E->getBase()->getType();
const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
if (!FD) return Error(E);
assert(!FD->getType()->isReferenceType() && "prvalue reference?");
assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
CompleteObject Obj(&Val, BaseTy);
SubobjectDesignator Designator(BaseTy);
Designator.addDeclUnchecked(FD);
APValue Result;
return extractSubobject(Info, E, Obj, Designator, Result) &&
DerivedSuccess(Result, E);
}
bool VisitCastExpr(const CastExpr *E) {
// HLSL Change Begins
if (Info.getLangOpts().HLSL) {
const auto* subExpr = E->getSubExpr();
if (subExpr->getStmtClass() == Stmt::InitListExprClass && !IsHLSLVecInitList(subExpr) && !subExpr->getType()->isScalarType())
return Error(E);
}
// HLSL Change Ends
switch (E->getCastKind()) {
default:
break;
case CK_AtomicToNonAtomic: {
APValue AtomicVal;
if (!EvaluateAtomic(E->getSubExpr(), AtomicVal, Info))
return false;
return DerivedSuccess(AtomicVal, E);
}
case CK_NoOp:
case CK_UserDefinedConversion:
return StmtVisitorTy::Visit(E->getSubExpr());
case CK_LValueToRValue: {
LValue LVal;
if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
return false;
APValue RVal;
// Note, we use the subexpression's type in order to retain cv-qualifiers.
if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
LVal, RVal))
return false;
return DerivedSuccess(RVal, E);
}
}
return Error(E);
}
bool VisitUnaryPostInc(const UnaryOperator *UO) {
return VisitUnaryPostIncDec(UO);
}
bool VisitUnaryPostDec(const UnaryOperator *UO) {
return VisitUnaryPostIncDec(UO);
}
bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(UO);
LValue LVal;
if (!EvaluateLValue(UO->getSubExpr(), LVal, Info))
return false;
APValue RVal;
if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(),
UO->isIncrementOp(), &RVal))
return false;
return DerivedSuccess(RVal, UO);
}
bool VisitStmtExpr(const StmtExpr *E) {
// We will have checked the full-expressions inside the statement expression
// when they were completed, and don't need to check them again now.
if (Info.checkingForOverflow())
return Error(E);
BlockScopeRAII Scope(Info);
const CompoundStmt *CS = E->getSubStmt();
if (CS->body_empty())
return true;
for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
BE = CS->body_end();
/**/; ++BI) {
if (BI + 1 == BE) {
const Expr *FinalExpr = dyn_cast<Expr>(*BI);
if (!FinalExpr) {
Info.Diag((*BI)->getLocStart(),
diag::note_constexpr_stmt_expr_unsupported);
return false;
}
return this->Visit(FinalExpr);
}
APValue ReturnValue;
EvalStmtResult ESR = EvaluateStmt(ReturnValue, Info, *BI);
if (ESR != ESR_Succeeded) {
// FIXME: If the statement-expression terminated due to 'return',
// 'break', or 'continue', it would be nice to propagate that to
// the outer statement evaluation rather than bailing out.
if (ESR != ESR_Failed)
Info.Diag((*BI)->getLocStart(),
diag::note_constexpr_stmt_expr_unsupported);
return false;
}
}
llvm_unreachable("Return from function from the loop above.");
}
/// Visit a value which is evaluated, but whose value is ignored.
void VisitIgnoredValue(const Expr *E) {
EvaluateIgnoredValue(Info, E);
}
};
}
//===----------------------------------------------------------------------===//
// Common base class for lvalue and temporary evaluation.
//===----------------------------------------------------------------------===//
namespace {
template<class Derived>
class LValueExprEvaluatorBase
: public ExprEvaluatorBase<Derived> {
protected:
LValue &Result;
typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
bool Success(APValue::LValueBase B) {
Result.set(B);
return true;
}
public:
LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
ExprEvaluatorBaseTy(Info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(this->Info.Ctx, V);
return true;
}
bool VisitMemberExpr(const MemberExpr *E) {
// Handle non-static data members.
QualType BaseTy;
if (E->isArrow()) {
if (!EvaluatePointer(E->getBase(), Result, this->Info))
return false;
BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
} else if (E->getBase()->isRValue()) {
assert(E->getBase()->getType()->isRecordType());
if (!EvaluateTemporary(E->getBase(), Result, this->Info))
return false;
BaseTy = E->getBase()->getType();
} else {
if (!this->Visit(E->getBase()))
return false;
BaseTy = E->getBase()->getType();
}
const ValueDecl *MD = E->getMemberDecl();
if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
(void)BaseTy;
if (!HandleLValueMember(this->Info, E, Result, FD))
return false;
} else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
return false;
} else
return this->Error(E);
if (MD->getType()->isReferenceType()) {
APValue RefValue;
if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
RefValue))
return false;
return Success(RefValue, E);
}
return true;
}
bool VisitBinaryOperator(const BinaryOperator *E) {
switch (E->getOpcode()) {
default:
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
case BO_PtrMemD:
case BO_PtrMemI:
return HandleMemberPointerAccess(this->Info, E, Result);
}
}
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
if (!this->Visit(E->getSubExpr()))
return false;
// Now figure out the necessary offset to add to the base LV to get from
// the derived class to the base class.
return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(),
Result);
}
}
};
}
//===----------------------------------------------------------------------===//
// LValue Evaluation
//
// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
// function designators (in C), decl references to void objects (in C), and
// temporaries (if building with -Wno-address-of-temporary).
//
// LValue evaluation produces values comprising a base expression of one of the
// following types:
// - Declarations
// * VarDecl
// * FunctionDecl
// - Literals
// * CompoundLiteralExpr in C
// * StringLiteral
// * CXXTypeidExpr
// * PredefinedExpr
// * ObjCStringLiteralExpr
// * ObjCEncodeExpr
// * AddrLabelExpr
// * BlockExpr
// * CallExpr for a MakeStringConstant builtin
// - Locals and temporaries
// * MaterializeTemporaryExpr
// * Any Expr, with a CallIndex indicating the function in which the temporary
// was evaluated, for cases where the MaterializeTemporaryExpr is missing
// from the AST (FIXME).
// * A MaterializeTemporaryExpr that has static storage duration, with no
// CallIndex, for a lifetime-extended temporary.
// plus an offset in bytes.
//===----------------------------------------------------------------------===//
namespace {
class LValueExprEvaluator
: public LValueExprEvaluatorBase<LValueExprEvaluator> {
public:
LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
LValueExprEvaluatorBaseTy(Info, Result) {}
bool VisitVarDecl(const Expr *E, const VarDecl *VD);
bool VisitUnaryPreIncDec(const UnaryOperator *UO);
bool VisitDeclRefExpr(const DeclRefExpr *E);
bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
bool VisitMemberExpr(const MemberExpr *E);
bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
bool VisitUnaryDeref(const UnaryOperator *E);
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
bool VisitUnaryPreInc(const UnaryOperator *UO) {
return VisitUnaryPreIncDec(UO);
}
bool VisitUnaryPreDec(const UnaryOperator *UO) {
return VisitUnaryPreIncDec(UO);
}
bool VisitBinAssign(const BinaryOperator *BO);
bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_LValueBitCast:
this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
if (!Visit(E->getSubExpr()))
return false;
Result.Designator.setInvalid();
return true;
case CK_BaseToDerived:
if (!Visit(E->getSubExpr()))
return false;
return HandleBaseToDerivedCast(Info, E, Result);
}
}
};
} // end anonymous namespace
/// Evaluate an expression as an lvalue. This can be legitimately called on
/// expressions which are not glvalues, in two cases:
/// * function designators in C, and
/// * "extern void" objects
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info) {
assert(E->isGLValue() || E->getType()->isFunctionType() ||
E->getType()->isVoidType());
return LValueExprEvaluator(Info, Result).Visit(E);
}
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
return Success(FD);
if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
return VisitVarDecl(E, VD);
return Error(E);
}
bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
CallStackFrame *Frame = nullptr;
if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1)
Frame = Info.CurrentCall;
if (!VD->getType()->isReferenceType()) {
if (Frame) {
Result.set(VD, Frame->Index);
return true;
}
return Success(VD);
}
APValue *V;
if (!evaluateVarDeclInit(Info, E, VD, Frame, V))
return false;
if (V->isUninit()) {
if (!Info.checkingPotentialConstantExpression())
Info.Diag(E, diag::note_constexpr_use_uninit_reference);
return false;
}
return Success(*V, E);
}
bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
// Walk through the expression to find the materialized temporary itself.
SmallVector<const Expr *, 2> CommaLHSs;
SmallVector<SubobjectAdjustment, 2> Adjustments;
const Expr *Inner = E->GetTemporaryExpr()->
skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
// If we passed any comma operators, evaluate their LHSs.
for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
if (!EvaluateIgnoredValue(Info, CommaLHSs[I]))
return false;
// A materialized temporary with static storage duration can appear within the
// result of a constant expression evaluation, so we need to preserve its
// value for use outside this evaluation.
APValue *Value;
if (E->getStorageDuration() == SD_Static) {
Value = Info.Ctx.getMaterializedTemporaryValue(E, true);
*Value = APValue();
Result.set(E);
} else {
Value = &Info.CurrentCall->
createTemporary(E, E->getStorageDuration() == SD_Automatic);
Result.set(E, Info.CurrentCall->Index);
}
QualType Type = Inner->getType();
// Materialize the temporary itself.
if (!EvaluateInPlace(*Value, Info, Result, Inner) ||
(E->getStorageDuration() == SD_Static &&
!CheckConstantExpression(Info, E->getExprLoc(), Type, *Value))) {
*Value = APValue();
return false;
}
// Adjust our lvalue to refer to the desired subobject.
for (unsigned I = Adjustments.size(); I != 0; /**/) {
--I;
switch (Adjustments[I].Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
if (!HandleLValueBasePath(Info, Adjustments[I].DerivedToBase.BasePath,
Type, Result))
return false;
Type = Adjustments[I].DerivedToBase.BasePath->getType();
break;
case SubobjectAdjustment::FieldAdjustment:
if (!HandleLValueMember(Info, E, Result, Adjustments[I].Field))
return false;
Type = Adjustments[I].Field->getType();
break;
case SubobjectAdjustment::MemberPointerAdjustment:
if (!HandleMemberPointerAccess(this->Info, Type, Result,
Adjustments[I].Ptr.RHS))
return false;
Type = Adjustments[I].Ptr.MPT->getPointeeType();
break;
}
}
return true;
}
bool
LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
// Defer visiting the literal until the lvalue-to-rvalue conversion. We can
// only see this when folding in C, so there's no standard to follow here.
return Success(E);
}
bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
if (!E->isPotentiallyEvaluated())
return Success(E);
Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
<< E->getExprOperand()->getType()
<< E->getExprOperand()->getSourceRange();
return false;
}
bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
return Success(E);
}
bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
// Handle static data members.
if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
VisitIgnoredValue(E->getBase());
return VisitVarDecl(E, VD);
}
// Handle static member functions.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
if (MD->isStatic()) {
VisitIgnoredValue(E->getBase());
return Success(MD);
}
}
// Handle non-static data members.
return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
}
bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// FIXME: Deal with vectors as array subscript bases.
if (E->getBase()->getType()->isVectorType())
return Error(E);
if (this->getEvalInfo().Ctx.getLangOpts().HLSL) { // HLSL Change
return Error(E); // TODO: handle arrays
} else {
if (!EvaluatePointer(E->getBase(), Result, Info))
return false;
}
APSInt Index;
if (!EvaluateInteger(E->getIdx(), Index, Info))
return false;
return HandleLValueArrayAdjustment(Info, E, Result, E->getType(),
getExtValue(Index));
}
bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
return EvaluatePointer(E->getSubExpr(), Result, Info);
}
bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (!Visit(E->getSubExpr()))
return false;
// __real is a no-op on scalar lvalues.
if (E->getSubExpr()->getType()->isAnyComplexType())
HandleLValueComplexElement(Info, E, Result, E->getType(), false);
return true;
}
bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
assert(E->getSubExpr()->getType()->isAnyComplexType() &&
"lvalue __imag__ on scalar?");
if (!Visit(E->getSubExpr()))
return false;
HandleLValueComplexElement(Info, E, Result, E->getType(), true);
return true;
}
bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(UO);
if (!this->Visit(UO->getSubExpr()))
return false;
return handleIncDec(
this->Info, UO, Result, UO->getSubExpr()->getType(),
UO->isIncrementOp(), nullptr);
}
bool LValueExprEvaluator::VisitCompoundAssignOperator(
const CompoundAssignOperator *CAO) {
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(CAO);
APValue RHS;
// The overall lvalue result is the result of evaluating the LHS.
if (!this->Visit(CAO->getLHS())) {
if (Info.keepEvaluatingAfterFailure())
Evaluate(RHS, this->Info, CAO->getRHS());
return false;
}
if (!Evaluate(RHS, this->Info, CAO->getRHS()))
return false;
return handleCompoundAssignment(
this->Info, CAO,
Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(),
CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS);
}
bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
return Error(E);
APValue NewVal;
if (!this->Visit(E->getLHS())) {
if (Info.keepEvaluatingAfterFailure())
Evaluate(NewVal, this->Info, E->getRHS());
return false;
}
if (!Evaluate(NewVal, this->Info, E->getRHS()))
return false;
return handleAssignment(this->Info, E, Result, E->getLHS()->getType(),
NewVal);
}
//===----------------------------------------------------------------------===//
// Pointer Evaluation
//===----------------------------------------------------------------------===//
namespace {
class PointerExprEvaluator
: public ExprEvaluatorBase<PointerExprEvaluator> {
LValue &Result;
bool Success(const Expr *E) {
Result.set(E);
return true;
}
public:
PointerExprEvaluator(EvalInfo &info, LValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(Info.Ctx, V);
return true;
}
bool ZeroInitialization(const Expr *E) {
return Success((Expr*)nullptr);
}
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitCastExpr(const CastExpr* E);
bool VisitUnaryAddrOf(const UnaryOperator *E);
bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
{ return Success(E); }
bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E)
{ return Success(E); }
bool VisitAddrLabelExpr(const AddrLabelExpr *E)
{ return Success(E); }
bool VisitCallExpr(const CallExpr *E);
bool VisitBlockExpr(const BlockExpr *E) {
if (!E->getBlockDecl()->hasCaptures())
return Success(E);
return Error(E);
}
bool VisitCXXThisExpr(const CXXThisExpr *E) {
// Can't look at 'this' when checking a potential constant expression.
if (Info.checkingPotentialConstantExpression())
return false;
if (!Info.CurrentCall->This) {
if (Info.getLangOpts().CPlusPlus11)
Info.Diag(E, diag::note_constexpr_this) << E->isImplicit();
else
Info.Diag(E);
return false;
}
Result = *Info.CurrentCall->This;
return true;
}
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->hasPointerRepresentation());
return PointerExprEvaluator(Info, Result).Visit(E);
}
bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() != BO_Add &&
E->getOpcode() != BO_Sub)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
const Expr *PExp = E->getLHS();
const Expr *IExp = E->getRHS();
if (IExp->getType()->isPointerType())
std::swap(PExp, IExp);
bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure())
return false;
llvm::APSInt Offset;
if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
return false;
int64_t AdditionalOffset = getExtValue(Offset);
if (E->getOpcode() == BO_Sub)
AdditionalOffset = -AdditionalOffset;
QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
AdditionalOffset);
}
bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return EvaluateLValue(E->getSubExpr(), Result, Info);
}
bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
const Expr* SubExpr = E->getSubExpr();
switch (E->getCastKind()) {
default:
break;
case CK_BitCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_AddressSpaceConversion:
if (!Visit(SubExpr))
return false;
// Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
Result.Designator.setInvalid();
if (SubExpr->getType()->isVoidPointerType())
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< 3 << SubExpr->getType();
else
CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
}
return true;
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
if (!EvaluatePointer(E->getSubExpr(), Result, Info))
return false;
if (!Result.Base && Result.Offset.isZero())
return true;
// Now figure out the necessary offset to add to the base LV to get from
// the derived class to the base class.
return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()->
castAs<PointerType>()->getPointeeType(),
Result);
case CK_BaseToDerived:
if (!Visit(E->getSubExpr()))
return false;
if (!Result.Base && Result.Offset.isZero())
return true;
return HandleBaseToDerivedCast(Info, E, Result);
case CK_NullToPointer:
VisitIgnoredValue(E->getSubExpr());
return ZeroInitialization(E);
case CK_IntegralToPointer: {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
if (Value.isInt()) {
unsigned Size = Info.Ctx.getTypeSize(E->getType());
uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
Result.Base = (Expr*)nullptr;
Result.Offset = CharUnits::fromQuantity(N);
Result.CallIndex = 0;
Result.Designator.setInvalid();
return true;
} else {
// Cast is of an lvalue, no need to change value.
Result.setFrom(Info.Ctx, Value);
return true;
}
}
case CK_ArrayToPointerDecay:
if (SubExpr->isGLValue()) {
if (!EvaluateLValue(SubExpr, Result, Info))
return false;
} else {
Result.set(SubExpr, Info.CurrentCall->Index);
if (!EvaluateInPlace(Info.CurrentCall->createTemporary(SubExpr, false),
Info, Result, SubExpr))
return false;
}
// The result is a pointer to the first element of the array.
if (const ConstantArrayType *CAT
= Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
Result.addArray(Info, E, CAT);
else
Result.Designator.setInvalid();
return true;
case CK_FunctionToPointerDecay:
return EvaluateLValue(SubExpr, Result, Info);
}
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
// C++ [expr.alignof]p3:
// When alignof is applied to a reference type, the result is the
// alignment of the referenced type.
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
T = Ref->getPointeeType();
// __alignof is defined to return the preferred alignment.
return Info.Ctx.toCharUnitsFromBits(
Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
}
static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
E = E->IgnoreParens();
// The kinds of expressions that we have special-case logic here for
// should be kept up to date with the special checks for those
// expressions in Sema.
// alignof decl is always accepted, even if it doesn't make sense: we default
// to 1 in those cases.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
return Info.Ctx.getDeclAlign(DRE->getDecl(),
/*RefAsPointee*/true);
if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
/*RefAsPointee*/true);
return GetAlignOfType(Info, E->getType());
}
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (IsStringLiteralCall(E))
return Success(E);
switch (E->getBuiltinCallee()) {
case Builtin::BI__builtin_addressof:
return EvaluateLValue(E->getArg(0), Result, Info);
case Builtin::BI__builtin_assume_aligned: {
// We need to be very careful here because: if the pointer does not have the
// asserted alignment, then the behavior is undefined, and undefined
// behavior is non-constant.
if (!EvaluatePointer(E->getArg(0), Result, Info))
return false;
LValue OffsetResult(Result);
APSInt Alignment;
if (!EvaluateInteger(E->getArg(1), Alignment, Info))
return false;
CharUnits Align = CharUnits::fromQuantity(getExtValue(Alignment));
if (E->getNumArgs() > 2) {
APSInt Offset;
if (!EvaluateInteger(E->getArg(2), Offset, Info))
return false;
int64_t AdditionalOffset = -getExtValue(Offset);
OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
}
// If there is a base object, then it must have the correct alignment.
if (OffsetResult.Base) {
CharUnits BaseAlignment;
if (const ValueDecl *VD =
OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
BaseAlignment = Info.Ctx.getDeclAlign(VD);
} else {
BaseAlignment =
GetAlignOfExpr(Info, OffsetResult.Base.get<const Expr*>());
}
if (BaseAlignment < Align) {
Result.Designator.setInvalid();
// FIXME: Quantities here cast to integers because the plural modifier
// does not work on APSInts yet.
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_insufficient_alignment) << 0
<< (int) BaseAlignment.getQuantity()
<< (unsigned) getExtValue(Alignment);
return false;
}
}
// The offset must also have the correct alignment.
if (OffsetResult.Offset.RoundUpToAlignment(Align) != OffsetResult.Offset) {
Result.Designator.setInvalid();
APSInt Offset(64, false);
Offset = OffsetResult.Offset.getQuantity();
if (OffsetResult.Base)
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_insufficient_alignment) << 1
<< (int) getExtValue(Offset) << (unsigned) getExtValue(Alignment);
else
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_value_insufficient_alignment)
<< Offset << (unsigned) getExtValue(Alignment);
return false;
}
return true;
}
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
}
//===----------------------------------------------------------------------===//
// Member Pointer Evaluation
//===----------------------------------------------------------------------===//
namespace {
class MemberPointerExprEvaluator
: public ExprEvaluatorBase<MemberPointerExprEvaluator> {
MemberPtr &Result;
bool Success(const ValueDecl *D) {
Result = MemberPtr(D);
return true;
}
public:
MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
: ExprEvaluatorBaseTy(Info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(V);
return true;
}
bool ZeroInitialization(const Expr *E) {
return Success((const ValueDecl*)nullptr);
}
bool VisitCastExpr(const CastExpr *E);
bool VisitUnaryAddrOf(const UnaryOperator *E);
};
} // end anonymous namespace
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isMemberPointerType());
return MemberPointerExprEvaluator(Info, Result).Visit(E);
}
bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_NullToMemberPointer:
VisitIgnoredValue(E->getSubExpr());
return ZeroInitialization(E);
case CK_BaseToDerivedMemberPointer: {
if (!Visit(E->getSubExpr()))
return false;
if (E->path_empty())
return true;
// Base-to-derived member pointer casts store the path in derived-to-base
// order, so iterate backwards. The CXXBaseSpecifier also provides us with
// the wrong end of the derived->base arc, so stagger the path by one class.
typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
PathI != PathE; ++PathI) {
assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
if (!Result.castToDerived(Derived))
return Error(E);
}
const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
return Error(E);
return true;
}
case CK_DerivedToBaseMemberPointer:
if (!Visit(E->getSubExpr()))
return false;
for (CastExpr::path_const_iterator PathI = E->path_begin(),
PathE = E->path_end(); PathI != PathE; ++PathI) {
assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
if (!Result.castToBase(Base))
return Error(E);
}
return true;
}
}
bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
// C++11 [expr.unary.op]p3 has very strict rules on how the address of a
// member can be formed.
return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
}
//===----------------------------------------------------------------------===//
// Record Evaluation
//===----------------------------------------------------------------------===//
namespace {
class RecordExprEvaluator
: public ExprEvaluatorBase<RecordExprEvaluator> {
const LValue &This;
APValue &Result;
public:
RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
: ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result = V;
return true;
}
bool ZeroInitialization(const Expr *E);
bool VisitCastExpr(const CastExpr *E);
bool VisitInitListExpr(const InitListExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E);
bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
};
}
/// Perform zero-initialization on an object of non-union class type.
/// C++11 [dcl.init]p5:
/// To zero-initialize an object or reference of type T means:
/// [...]
/// -- if T is a (possibly cv-qualified) non-union class type,
/// each non-static data member and each base-class subobject is
/// zero-initialized
static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
const RecordDecl *RD,
const LValue &This, APValue &Result) {
assert(!RD->isUnion() && "Expected non-union class type");
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
std::distance(RD->field_begin(), RD->field_end()));
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
if (CD) {
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
End = CD->bases_end(); I != End; ++I, ++Index) {
const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
LValue Subobject = This;
if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout))
return false;
if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
Result.getStructBase(Index)))
return false;
}
}
for (const auto *I : RD->fields()) {
// -- if T is a reference type, no initialization is performed.
if (I->getType()->isReferenceType())
continue;
LValue Subobject = This;
if (!HandleLValueMember(Info, E, Subobject, I, &Layout))
return false;
ImplicitValueInitExpr VIE(I->getType());
if (!EvaluateInPlace(
Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE))
return false;
}
return true;
}
bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
if (RD->isInvalidDecl()) return false;
if (RD->isUnion()) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
// object's first non-static named data member is zero-initialized
RecordDecl::field_iterator I = RD->field_begin();
if (I == RD->field_end()) {
Result = APValue((const FieldDecl*)nullptr);
return true;
}
LValue Subobject = This;
if (!HandleLValueMember(Info, E, Subobject, *I))
return false;
Result = APValue(*I);
ImplicitValueInitExpr VIE(I->getType());
return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
}
if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
Info.Diag(E, diag::note_constexpr_virtual_base) << RD;
return false;
}
return HandleClassZeroInitialization(Info, E, RD, This, Result);
}
bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_ConstructorConversion:
return Visit(E->getSubExpr());
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase: {
APValue DerivedObject;
if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
return false;
if (!DerivedObject.isStruct())
return Error(E->getSubExpr());
// Derived-to-base rvalue conversion: just slice off the derived part.
APValue *Value = &DerivedObject;
const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
for (CastExpr::path_const_iterator PathI = E->path_begin(),
PathE = E->path_end(); PathI != PathE; ++PathI) {
assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
Value = &Value->getStructBase(getBaseIndex(RD, Base));
RD = Base;
}
Result = *Value;
return true;
}
}
}
bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
if (Info.getLangOpts().HLSL) return false; // HLSL Change
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
if (RD->isUnion()) {
const FieldDecl *Field = E->getInitializedFieldInUnion();
Result = APValue(Field);
if (!Field)
return true;
// If the initializer list for a union does not contain any elements, the
// first element of the union is value-initialized.
// FIXME: The element should be initialized from an initializer list.
// Is this difference ever observable for initializer lists which
// we don't build?
ImplicitValueInitExpr VIE(Field->getType());
const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
LValue Subobject = This;
if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout))
return false;
// Temporarily override This, in case there's a CXXDefaultInitExpr in here.
ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
isa<CXXDefaultInitExpr>(InitExpr));
return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
}
assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) &&
"initializer list for class with base classes");
Result = APValue(APValue::UninitStruct(), 0,
std::distance(RD->field_begin(), RD->field_end()));
unsigned ElementNo = 0;
bool Success = true;
for (const auto *Field : RD->fields()) {
// Anonymous bit-fields are not considered members of the class for
// purposes of aggregate initialization.
if (Field->isUnnamedBitfield())
continue;
LValue Subobject = This;
bool HaveInit = ElementNo < E->getNumInits();
// FIXME: Diagnostics here should point to the end of the initializer
// list, not the start.
if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
Subobject, Field, &Layout))
return false;
// Perform an implicit value-initialization for members beyond the end of
// the initializer list.
ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE;
// Temporarily override This, in case there's a CXXDefaultInitExpr in here.
ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
isa<CXXDefaultInitExpr>(Init));
APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) ||
(Field->isBitField() && !truncateBitfieldValue(Info, Init,
FieldVal, Field))) {
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
}
}
return Success;
}
bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
const CXXConstructorDecl *FD = E->getConstructor();
if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
// If we've already performed zero-initialization, we're already done.
if (!Result.isUninit())
return true;
// We can get here in two different ways:
// 1) We're performing value-initialization, and should zero-initialize
// the object, or
// 2) We're performing default-initialization of an object with a trivial
// constexpr default constructor, in which case we should start the
// lifetimes of all the base subobjects (there can be no data member
// subobjects in this case) per [basic.life]p1.
// Either way, ZeroInitialization is appropriate.
return ZeroInitialization(E);
}
const FunctionDecl *Definition = nullptr;
FD->getBody(Definition);
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
return false;
// Avoid materializing a temporary for an elidable copy/move constructor.
if (E->isElidable() && !ZeroInit)
if (const MaterializeTemporaryExpr *ME
= dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
return Visit(ME->GetTemporaryExpr());
if (ZeroInit && !ZeroInitialization(E))
return false;
auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E->getExprLoc(), This, Args,
cast<CXXConstructorDecl>(Definition), Info,
Result);
}
bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
const CXXStdInitializerListExpr *E) {
const ConstantArrayType *ArrayType =
Info.Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
LValue Array;
if (!EvaluateLValue(E->getSubExpr(), Array, Info))
return false;
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
// FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
if (Field == Record->field_end())
return Error(E);
// Start pointer.
if (!Field->getType()->isPointerType() ||
!Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType()))
return Error(E);
// FIXME: What if the initializer_list type has base classes, etc?
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
if (++Field == Record->field_end())
return Error(E);
if (Field->getType()->isPointerType() &&
Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType())) {
// End pointer.
if (!HandleLValueArrayAdjustment(Info, E, Array,
ArrayType->getElementType(),
ArrayType->getSize().getZExtValue()))
return false;
Array.moveInto(Result.getStructField(1));
} else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType()))
// Length.
Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
else
return Error(E);
if (++Field != Record->field_end())
return Error(E);
return true;
}
static bool EvaluateRecord(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isRecordType() &&
"can't evaluate expression as a record rvalue");
return RecordExprEvaluator(Info, This, Result).Visit(E);
}
//===----------------------------------------------------------------------===//
// Temporary Evaluation
//
// Temporaries are represented in the AST as rvalues, but generally behave like
// lvalues. The full-object of which the temporary is a subobject is implicitly
// materialized so that a reference can bind to it.
//===----------------------------------------------------------------------===//
namespace {
class TemporaryExprEvaluator
: public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
public:
TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
LValueExprEvaluatorBaseTy(Info, Result) {}
/// Visit an expression which constructs the value of this temporary.
bool VisitConstructExpr(const Expr *E) {
Result.set(E, Info.CurrentCall->Index);
return EvaluateInPlace(Info.CurrentCall->createTemporary(E, false),
Info, Result, E);
}
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_ConstructorConversion:
return VisitConstructExpr(E->getSubExpr());
}
}
bool VisitInitListExpr(const InitListExpr *E) {
if (Info.getLangOpts().HLSL && !IsHLSLVecInitList(E)) return Error(E); // HLSL Change
return VisitConstructExpr(E);
}
bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
return VisitConstructExpr(E);
}
bool VisitCallExpr(const CallExpr *E) {
return VisitConstructExpr(E);
}
bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
return VisitConstructExpr(E);
}
};
} // end anonymous namespace
/// Evaluate an expression of record type as a temporary.
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isRecordType());
return TemporaryExprEvaluator(Info, Result).Visit(E);
}
//===----------------------------------------------------------------------===//
// Vector Evaluation
//===----------------------------------------------------------------------===//
namespace {
class VectorExprEvaluator
: public ExprEvaluatorBase<VectorExprEvaluator> {
APValue &Result;
public:
VectorExprEvaluator(EvalInfo &info, APValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
bool Success(const ArrayRef<APValue> &V, const Expr *E) {
// HLSL Change Begins.
const VectorType *VT = nullptr;
if (hlsl::IsHLSLVecType(E->getType())) {
VT = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getEvalInfo().Ctx,
E->getType());
} else
VT = E->getType()->castAs<VectorType>();
// HLSL Change Ends.
assert(V.size() == VT->getNumElements());
// FIXME: remove this APValue copy.
Result = APValue(V.data(), V.size());
return true;
}
bool Success(const APValue &V, const Expr *E) {
assert(V.isVector());
Result = V;
return true;
}
bool ZeroInitialization(const Expr *E);
bool VisitUnaryReal(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
bool VisitCastExpr(const CastExpr* E);
bool VisitInitListExpr(const InitListExpr *E);
bool VisitHLSLVectorElementExpr(const HLSLVectorElementExpr *E);
bool VisitUnaryImag(const UnaryOperator *E);
// FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
// binary comparisons, binary and/or/xor,
// shufflevector, ExtVectorElementExpr
};
} // end anonymous namespace
static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
assert(E->isRValue() && (E->getType()->isVectorType() || hlsl::IsHLSLVecType(E->getType())) &&"not a vector rvalue"); // HLSL Change
return VectorExprEvaluator(Info, Result).Visit(E);
}
bool VectorExprEvaluator::VisitHLSLVectorElementExpr(
const HLSLVectorElementExpr *E) {
SmallVector<APValue, 4> Elts;
const Expr *baseExpr = E->getBase();
// Handling cases where HLSLVectorElement access into constant vector.
// For example: float4 a = (0.0).xxxx;
if (Evaluate(Result, Info, baseExpr) && !Info.EvalStatus.HasSideEffects &&
Result.getKind() == APValue::ValueKind::Vector) {
hlsl::VectorMemberAccessPositions accessor = E->getEncodedElementAccess();
for (uint32_t i = 0; i < accessor.Count; ++i) {
uint32_t selector;
accessor.GetPosition(i, &selector);
Elts.push_back(Result.getVectorElt(selector));
}
return Success(Elts, E);
}
// TODO: Other cases may be added for other APValue::ValueKind.
return false;
}
bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
// HLSL Change Begins.
const VectorType *VTy;
if (Info.getLangOpts().HLSL && hlsl::IsHLSLVecType(E->getType())) {
VTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getEvalInfo().Ctx, E->getType());
} else {
VTy = E->getType()->castAs<VectorType>();
}
// HLSL Change Ends.
unsigned NElts = VTy->getNumElements();
const Expr *SE = E->getSubExpr();
QualType SETy = SE->getType();
switch (E->getCastKind()) {
// HLSL Change Begins.
case CK_HLSLCC_FloatingCast: {
if (!Visit(SE))
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < Result.getVectorLength(); ++i) {
APValue Elem = Result.getVectorElt(i);
HandleFloatToFloatCast(
Info, E, hlsl::GetHLSLVecElementType(SE->getType()),
hlsl::GetHLSLVecElementType(E->getType()), Elem.getFloat());
Elts.push_back(Elem);
}
return Success(Elts, E);
}
case CK_HLSLVectorTruncationCast: {
if (!Visit(SE))
return Error(E);
unsigned destSize = hlsl::IsHLSLVecType(E->getType()) ? hlsl::GetHLSLVecSize(E->getType()) : 1;
unsigned srcSize = Result.getVectorLength();
// Given that this is a vector truncation op, dest size must be
// less than the source size.
if (destSize >= srcSize)
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < destSize; ++i) {
APValue Elem = Result.getVectorElt(i);
Elts.push_back(Elem);
}
return Success(Elts, E);
}
case CK_HLSLCC_IntegralCast: {
if (!Visit(SE))
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < Result.getVectorLength(); ++i) {
APValue Elem = Result.getVectorElt(i);
APSInt NewElemInt = HandleIntToIntCast(
Info, E, hlsl::GetHLSLVecElementType(E->getType()),
hlsl::GetHLSLVecElementType(SE->getType()), Elem.getInt());
APValue NewElem(NewElemInt);
Elts.push_back(NewElem);
}
return Success(Elts, E);
}
case CK_HLSLCC_FloatingToBoolean:
case CK_HLSLCC_IntegralToBoolean: {
if (!Visit(SE))
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < Result.getVectorLength(); ++i) {
APValue Elem = Result.getVectorElt(i);
bool ResultBool;
if (!HandleConversionToBool(Elem, ResultBool))
return Error(E);
// Construct an int with bitwidth 1 to represent a boolean
APSInt ElemBool(/*BitWidth*/ 1);
if (ResultBool) {
// If the conversion to bool is true then set the LSB
ElemBool.setBit(0);
}
APValue NewElem(ElemBool);
Elts.push_back(NewElem);
}
return Success(Elts, E);
}
case CK_HLSLCC_IntegralToFloating: {
if (!Visit(SE))
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < Result.getVectorLength(); ++i) {
APFloat ElemFloat(0.0);
HandleIntToFloatCast(Info, E, hlsl::GetHLSLVecElementType(SE->getType()),
Result.getVectorElt(i).getInt(),
hlsl::GetHLSLVecElementType(E->getType()),
ElemFloat);
Elts.push_back(APValue(ElemFloat));
}
return Success(Elts, E);
}
case CK_HLSLCC_FloatingToIntegral: {
if (!Visit(SE))
return Error(E);
SmallVector<APValue, 4> Elts;
for (uint32_t i = 0; i < Result.getVectorLength(); ++i) {
APSInt ElemInt;
HandleFloatToIntCast(Info, E, hlsl::GetHLSLVecElementType(SE->getType()),
Result.getVectorElt(i).getFloat(),
hlsl::GetHLSLVecElementType(E->getType()), ElemInt);
Elts.push_back(APValue(ElemInt));
}
return Success(Elts, E);
}
// HLSL Change Ends.
case CK_HLSLVectorSplat: // HLSL Change
case CK_VectorSplat: {
APValue Val = APValue();
if (SETy->isIntegerType()) {
APSInt IntResult;
if (!EvaluateInteger(SE, IntResult, Info))
return false;
Val = APValue(IntResult);
} else if (SETy->isRealFloatingType()) {
APFloat F(0.0);
if (!EvaluateFloat(SE, F, Info))
return false;
Val = APValue(F);
} else {
return Error(E);
}
// Splat and create vector APValue.
SmallVector<APValue, 4> Elts(NElts, Val);
return Success(Elts, E);
}
case CK_BitCast: {
// Evaluate the operand into an APInt we can extract from.
llvm::APInt SValInt;
if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
return false;
// Extract the elements
QualType EltTy = VTy->getElementType();
unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
SmallVector<APValue, 4> Elts;
if (EltTy->isRealFloatingType()) {
const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
unsigned FloatEltSize = EltSize;
if (&Sem == &APFloat::x87DoubleExtended)
FloatEltSize = 80;
for (unsigned i = 0; i < NElts; i++) {
llvm::APInt Elt;
if (BigEndian)
Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
else
Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
Elts.push_back(APValue(APFloat(Sem, Elt)));
}
} else if (EltTy->isIntegerType()) {
for (unsigned i = 0; i < NElts; i++) {
llvm::APInt Elt;
if (BigEndian)
Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
else
Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
}
} else {
return Error(E);
}
return Success(Elts, E);
}
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
}
bool
VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// HLSL Change Begins.
if (Info.getLangOpts().HLSL) {
QualType Ty = E->getType();
if (!hlsl::IsHLSLVecType(Ty))
return Error(E); // HLSL Change
unsigned vecSize = hlsl::GetHLSLVecSize(Ty);
// TODO: support all cases.
if (E->getNumInits() != vecSize)
return Error(E); // HLSL Change
}
// HLSL Change Ends.
const VectorType *VT = nullptr;
// HLSL Change Begins.
if (hlsl::IsHLSLVecType(E->getType())) {
VT = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getEvalInfo().Ctx,
E->getType());
} else
VT = E->getType()->castAs<VectorType>();
// HLSL Change Ends.
unsigned NumInits = E->getNumInits();
unsigned NumElements = VT->getNumElements();
QualType EltTy = VT->getElementType();
SmallVector<APValue, 4> Elements;
// The number of initializers can be less than the number of
// vector elements. For OpenCL, this can be due to nested vector
// initialization. For GCC compatibility, missing trailing elements
// should be initialized with zeroes.
unsigned CountInits = 0, CountElts = 0;
while (CountElts < NumElements) {
// Handle nested vector initialization.
if (CountInits < NumInits
&& (E->getInit(CountInits)->getType()->isVectorType() ||
// HLSL Change Begins.
(Info.getLangOpts().HLSL &&
hlsl::IsHLSLVecType(E->getInit(CountInits)->getType())))) {
// HLSL Change Ends.
APValue v;
if (!EvaluateVector(E->getInit(CountInits), v, Info))
return Error(E);
unsigned vlen = v.getVectorLength();
for (unsigned j = 0; j < vlen; j++)
Elements.push_back(v.getVectorElt(j));
CountElts += vlen;
} else if (EltTy->isIntegerType()) {
llvm::APSInt sInt(32);
if (CountInits < NumInits) {
if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
return false;
} else // trailing integer zero.
sInt = Info.Ctx.MakeIntValue(0, EltTy);
Elements.push_back(APValue(sInt));
CountElts++;
} else {
llvm::APFloat f(0.0);
if (CountInits < NumInits) {
if (!EvaluateFloat(E->getInit(CountInits), f, Info))
return false;
} else // trailing float zero.
f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
Elements.push_back(APValue(f));
CountElts++;
}
CountInits++;
}
return Success(Elements, E);
}
bool
VectorExprEvaluator::ZeroInitialization(const Expr *E) {
const VectorType *VT = E->getType()->getAs<VectorType>();
QualType EltTy = VT->getElementType();
APValue ZeroElement;
if (EltTy->isIntegerType())
ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
else
ZeroElement =
APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
return Success(Elements, E);
}
bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
VisitIgnoredValue(E->getSubExpr());
return ZeroInitialization(E);
}
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
namespace {
class ArrayExprEvaluator
: public ExprEvaluatorBase<ArrayExprEvaluator> {
const LValue &This;
APValue &Result;
public:
ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
: ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
assert((V.isArray() || V.isLValue()) &&
"expected array or string literal");
Result = V;
return true;
}
bool ZeroInitialization(const Expr *E) {
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(E->getType());
if (!CAT)
return Error(E);
Result = APValue(APValue::UninitArray(), 0,
CAT->getSize().getZExtValue());
if (!Result.hasArrayFiller()) return true;
// Zero-initialize all elements.
LValue Subobject = This;
Subobject.addArray(Info, E, CAT);
ImplicitValueInitExpr VIE(CAT->getElementType());
return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
}
bool VisitInitListExpr(const InitListExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
APValue *Value, QualType Type);
};
} // end anonymous namespace
static bool EvaluateArray(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
return ArrayExprEvaluator(Info, This, Result).Visit(E);
}
bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
if (Info.getLangOpts().HLSL) return Error(E); // HLSL Change
const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
if (!CAT)
return Error(E);
// C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
// an appropriately-typed string literal enclosed in braces.
if (E->isStringLiteralInit()) {
LValue LV;
if (!EvaluateLValue(E->getInit(0), LV, Info))
return false;
APValue Val;
LV.moveInto(Val);
return Success(Val, E);
}
bool Success = true;
assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
"zero-initialized array shouldn't have any initialized elts");
APValue Filler;
if (Result.isArray() && Result.hasArrayFiller())
Filler = Result.getArrayFiller();
unsigned NumEltsToInit = E->getNumInits();
unsigned NumElts = CAT->getSize().getZExtValue();
const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr;
// If the initializer might depend on the array index, run it for each
// array element. For now, just whitelist non-class value-initialization.
if (NumEltsToInit != NumElts && !isa<ImplicitValueInitExpr>(FillerExpr))
NumEltsToInit = NumElts;
Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
// If the array was previously zero-initialized, preserve the
// zero-initialized values.
if (!Filler.isUninit()) {
for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
Result.getArrayInitializedElt(I) = Filler;
if (Result.hasArrayFiller())
Result.getArrayFiller() = Filler;
}
LValue Subobject = This;
Subobject.addArray(Info, E, CAT);
for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
const Expr *Init =
Index < E->getNumInits() ? E->getInit(Index) : FillerExpr;
if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
Info, Subobject, Init) ||
!HandleLValueArrayAdjustment(Info, Init, Subobject,
CAT->getElementType(), 1)) {
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
}
}
if (!Result.hasArrayFiller())
return Success;
// If we get here, we have a trivial filler, which we can just evaluate
// once and splat over the rest of the array elements.
assert(FillerExpr && "no array filler for incomplete init list");
return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject,
FillerExpr) && Success;
}
bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return VisitCXXConstructExpr(E, This, &Result, E->getType());
}
bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
APValue *Value,
QualType Type) {
bool HadZeroInit = !Value->isUninit();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
unsigned N = CAT->getSize().getZExtValue();
// Preserve the array filler if we had prior zero-initialization.
APValue Filler =
HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
: APValue();
*Value = APValue(APValue::UninitArray(), N, N);
if (HadZeroInit)
for (unsigned I = 0; I != N; ++I)
Value->getArrayInitializedElt(I) = Filler;
// Initialize the elements.
LValue ArrayElt = Subobject;
ArrayElt.addArray(Info, E, CAT);
for (unsigned I = 0; I != N; ++I)
if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I),
CAT->getElementType()) ||
!HandleLValueArrayAdjustment(Info, E, ArrayElt,
CAT->getElementType(), 1))
return false;
return true;
}
if (!Type->isRecordType())
return Error(E);
const CXXConstructorDecl *FD = E->getConstructor();
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
if (HadZeroInit)
return true;
// See RecordExprEvaluator::VisitCXXConstructExpr for explanation.
ImplicitValueInitExpr VIE(Type);
return EvaluateInPlace(*Value, Info, Subobject, &VIE);
}
const FunctionDecl *Definition = nullptr;
FD->getBody(Definition);
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
return false;
if (ZeroInit && !HadZeroInit) {
ImplicitValueInitExpr VIE(Type);
if (!EvaluateInPlace(*Value, Info, Subobject, &VIE))
return false;
}
auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
cast<CXXConstructorDecl>(Definition),
Info, *Value);
}
//===----------------------------------------------------------------------===//
// Integer Evaluation
//
// As a GNU extension, we support casting pointers to sufficiently-wide integer
// types and back in constant folding. Integer values are thus represented
// either as an integer-valued APValue, or as an lvalue-valued APValue.
//===----------------------------------------------------------------------===//
namespace {
class IntExprEvaluator
: public ExprEvaluatorBase<IntExprEvaluator> {
APValue &Result;
public:
IntExprEvaluator(EvalInfo &info, APValue &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
"Invalid evaluation result.");
assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
"Invalid evaluation result.");
Result = APValue(SI);
return true;
}
bool Success(const llvm::APSInt &SI, const Expr *E) {
return Success(SI, E, Result);
}
bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
"Invalid evaluation result.");
Result = APValue(APSInt(I));
Result.getInt().setIsUnsigned(
E->getType()->isUnsignedIntegerOrEnumerationType());
return true;
}
bool Success(const llvm::APInt &I, const Expr *E) {
return Success(I, E, Result);
}
bool Success(uint64_t Value, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
}
bool Success(uint64_t Value, const Expr *E) {
return Success(Value, E, Result);
}
bool Success(CharUnits Size, const Expr *E) {
return Success(Size.getQuantity(), E);
}
bool Success(const APValue &V, const Expr *E) {
if (V.isLValue() || V.isAddrLabelDiff()) {
Result = V;
return true;
}
return Success(V.getInt(), E);
}
bool ZeroInitialization(const Expr *E) { return Success(0, E); }
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
bool VisitIntegerLiteral(const IntegerLiteral *E) {
return Success(E->getValue(), E);
}
bool VisitCharacterLiteral(const CharacterLiteral *E) {
return Success(E->getValue(), E);
}
bool CheckReferencedDecl(const Expr *E, const Decl *D);
bool VisitDeclRefExpr(const DeclRefExpr *E) {
if (CheckReferencedDecl(E, E->getDecl()))
return true;
return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
}
bool VisitMemberExpr(const MemberExpr *E) {
if (CheckReferencedDecl(E, E->getMemberDecl())) {
VisitIgnoredValue(E->getBase());
return true;
}
return ExprEvaluatorBaseTy::VisitMemberExpr(E);
}
bool VisitCallExpr(const CallExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitOffsetOfExpr(const OffsetOfExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
bool VisitCastExpr(const CastExpr* E);
bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return Success(E->getValue(), E);
}
bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
return Success(E->getValue(), E);
}
// Note, GNU defines __null as an integer, not a pointer.
bool VisitGNUNullExpr(const GNUNullExpr *E) {
return ZeroInitialization(E);
}
bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
return Success(E->getValue(), E);
}
bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
return Success(E->getValue(), E);
}
bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
return Success(E->getValue(), E);
}
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
private:
static QualType GetObjectType(APValue::LValueBase B);
bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
/// produce either the integer value or a pointer.
///
/// GCC has a heinous extension which folds casts between pointer types and
/// pointer-sized integral types. We support this by allowing the evaluation of
/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
/// Some simple arithmetic on such values is supported (they are treated much
/// like char*).
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(E);
}
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
APValue Val;
if (!EvaluateIntegerOrLValue(E, Val, Info))
return false;
if (!Val.isInt()) {
// FIXME: It would be better to produce the diagnostic for casting
// a pointer to an integer.
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
Result = Val.getInt();
return true;
}
/// Check whether the given declaration can be directly converted to an integral
/// rvalue. If not, no diagnostic is produced; there are other things we can
/// try.
bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
// Enums are integer constant exprs.
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
// Check for signedness/width mismatches between E type and ECD value.
bool SameSign = (ECD->getInitVal().isSigned()
== E->getType()->isSignedIntegerOrEnumerationType());
bool SameWidth = (ECD->getInitVal().getBitWidth()
== Info.Ctx.getIntWidth(E->getType()));
if (SameSign && SameWidth)
return Success(ECD->getInitVal(), E);
else {
// Get rid of mismatch (otherwise Success assertions will fail)
// by computing a new value matching the type of E.
llvm::APSInt Val = ECD->getInitVal();
if (!SameSign)
Val.setIsSigned(!ECD->getInitVal().isSigned());
if (!SameWidth)
Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
return Success(Val, E);
}
}
return false;
}
/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
/// as GCC.
static int EvaluateBuiltinClassifyType(const CallExpr *E) {
// The following enum mimics the values returned by GCC.
// FIXME: Does GCC differ between lvalue and rvalue references here?
enum gcc_type_class {
no_type_class = -1,
void_type_class, integer_type_class, char_type_class,
enumeral_type_class, boolean_type_class,
pointer_type_class, reference_type_class, offset_type_class,
real_type_class, complex_type_class,
function_type_class, method_type_class,
record_type_class, union_type_class,
array_type_class, string_type_class,
lang_type_class
};
// If no argument was supplied, default to "no_type_class". This isn't
// ideal, however it is what gcc does.
if (E->getNumArgs() == 0)
return no_type_class;
QualType ArgTy = E->getArg(0)->getType();
if (ArgTy->isVoidType())
return void_type_class;
else if (ArgTy->isEnumeralType())
return enumeral_type_class;
else if (ArgTy->isBooleanType())
return boolean_type_class;
else if (ArgTy->isCharType())
return string_type_class; // gcc doesn't appear to use char_type_class
else if (ArgTy->isIntegerType())
return integer_type_class;
else if (ArgTy->isPointerType())
return pointer_type_class;
else if (ArgTy->isReferenceType())
return reference_type_class;
else if (ArgTy->isRealType())
return real_type_class;
else if (ArgTy->isComplexType())
return complex_type_class;
else if (ArgTy->isFunctionType())
return function_type_class;
else if (ArgTy->isStructureOrClassType())
return record_type_class;
else if (ArgTy->isUnionType())
return union_type_class;
else if (ArgTy->isArrayType())
return array_type_class;
else if (ArgTy->isUnionType())
return union_type_class;
else // FIXME: offset_type_class, method_type_class, & lang_type_class?
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
}
/// EvaluateBuiltinConstantPForLValue - Determine the result of
/// __builtin_constant_p when applied to the given lvalue.
///
/// An lvalue is only "constant" if it is a pointer or reference to the first
/// character of a string literal.
template<typename LValue>
static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
}
/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
/// GCC as we can manage.
static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
QualType ArgType = Arg->getType();
// __builtin_constant_p always has one operand. The rules which gcc follows
// are not precisely documented, but are as follows:
//
// - If the operand is of integral, floating, complex or enumeration type,
// and can be folded to a known value of that type, it returns 1.
// - If the operand and can be folded to a pointer to the first character
// of a string literal (or such a pointer cast to an integral type), it
// returns 1.
//
// Otherwise, it returns 0.
//
// FIXME: GCC also intends to return 1 for literals of aggregate types, but
// its support for this does not currently work.
if (ArgType->isIntegralOrEnumerationType()) {
Expr::EvalResult Result;
if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
return false;
APValue &V = Result.Val;
if (V.getKind() == APValue::Int)
return true;
return EvaluateBuiltinConstantPForLValue(V);
} else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
return Arg->isEvaluatable(Ctx);
} else if (ArgType->isPointerType() || Arg->isGLValue()) {
LValue LV;
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
: EvaluatePointer(Arg, LV, Info)) &&
!Status.HasSideEffects)
return EvaluateBuiltinConstantPForLValue(LV);
}
// Anything else isn't considered to be sufficiently constant.
return false;
}
/// Retrieves the "underlying object type" of the given expression,
/// as used by __builtin_object_size.
QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->getType();
} else if (const Expr *E = B.get<const Expr*>()) {
if (isa<CompoundLiteralExpr>(E))
return E->getType();
}
return QualType();
}
bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
LValue Base;
{
// The operand of __builtin_object_size is never evaluated for side-effects.
// If there are any, but we can determine the pointed-to object anyway, then
// ignore the side-effects.
SpeculativeEvaluationRAII SpeculativeEval(Info);
if (!EvaluatePointer(E->getArg(0), Base, Info))
return false;
}
if (!Base.getLValueBase()) {
// It is not possible to determine which objects ptr points to at compile time,
// __builtin_object_size should return (size_t) -1 for type 0 or 1
// and (size_t) 0 for type 2 or 3.
llvm::APSInt TypeIntVaue;
const Expr *ExprType = E->getArg(1);
if (!ExprType->EvaluateAsInt(TypeIntVaue, Info.Ctx))
return false;
if (TypeIntVaue == 0 || TypeIntVaue == 1)
return Success(-1, E);
if (TypeIntVaue == 2 || TypeIntVaue == 3)
return Success(0, E);
return Error(E);
}
QualType T = GetObjectType(Base.getLValueBase());
if (T.isNull() ||
T->isIncompleteType() ||
T->isFunctionType() ||
T->isVariablyModifiedType() ||
T->isDependentType())
return Error(E);
CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
CharUnits Offset = Base.getLValueOffset();
if (!Offset.isNegative() && Offset <= Size)
Size -= Offset;
else
Size = CharUnits::Zero();
return Success(Size, E);
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
case Builtin::BI__builtin_object_size: {
if (TryEvaluateBuiltinObjectSize(E))
return true;
// If evaluating the argument has side-effects, we can't determine the size
// of the object, and so we lower it to unknown now. CodeGen relies on us to
// handle all cases where the expression has side-effects.
if (E->getArg(0)->HasSideEffects(Info.Ctx)) {
if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1)
return Success(-1ULL, E);
return Success(0, E);
}
// Expression had no side effects, but we couldn't statically determine the
// size of the referenced object.
switch (Info.EvalMode) {
case EvalInfo::EM_ConstantExpression:
case EvalInfo::EM_PotentialConstantExpression:
case EvalInfo::EM_ConstantFold:
case EvalInfo::EM_EvaluateForOverflow:
case EvalInfo::EM_IgnoreSideEffects:
return Error(E);
case EvalInfo::EM_ConstantExpressionUnevaluated:
case EvalInfo::EM_PotentialConstantExpressionUnevaluated:
return Success(-1ULL, E);
}
llvm_unreachable("Invalid EvalMode!");
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
return Success(Val.byteSwap(), E);
}
case Builtin::BI__builtin_classify_type:
return Success(EvaluateBuiltinClassifyType(E), E);
// FIXME: BI__builtin_clrsb
// FIXME: BI__builtin_clrsbl
// FIXME: BI__builtin_clrsbll
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll:
case Builtin::BI__builtin_clzs: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
if (!Val)
return Error(E);
return Success(Val.countLeadingZeros(), E);
}
case Builtin::BI__builtin_constant_p:
return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
case Builtin::BI__builtin_ctzs: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
if (!Val)
return Error(E);
return Success(Val.countTrailingZeros(), E);
}
case Builtin::BI__builtin_eh_return_data_regno: {
int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
return Success(Operand, E);
}
case Builtin::BI__builtin_expect:
return Visit(E->getArg(0));
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
unsigned N = Val.countTrailingZeros();
return Success(N == Val.getBitWidth() ? 0 : N + 1, E);
}
case Builtin::BI__builtin_fpclassify: {
APFloat Val(0.0);
if (!EvaluateFloat(E->getArg(5), Val, Info))
return false;
unsigned Arg;
switch (Val.getCategory()) {
case APFloat::fcNaN: Arg = 0; break;
case APFloat::fcInfinity: Arg = 1; break;
case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
case APFloat::fcZero: Arg = 4; break;
}
return Visit(E->getArg(Arg));
}
case Builtin::BI__builtin_isinf_sign: {
APFloat Val(0.0);
return EvaluateFloat(E->getArg(0), Val, Info) &&
Success(Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E);
}
case Builtin::BI__builtin_isinf: {
APFloat Val(0.0);
return EvaluateFloat(E->getArg(0), Val, Info) &&
Success(Val.isInfinity() ? 1 : 0, E);
}
case Builtin::BI__builtin_isfinite: {
APFloat Val(0.0);
return EvaluateFloat(E->getArg(0), Val, Info) &&
Success(Val.isFinite() ? 1 : 0, E);
}
case Builtin::BI__builtin_isnan: {
APFloat Val(0.0);
return EvaluateFloat(E->getArg(0), Val, Info) &&
Success(Val.isNaN() ? 1 : 0, E);
}
case Builtin::BI__builtin_isnormal: {
APFloat Val(0.0);
return EvaluateFloat(E->getArg(0), Val, Info) &&
Success(Val.isNormal() ? 1 : 0, E);
}
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
return Success(Val.countPopulation() % 2, E);
}
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
return Success(Val.countPopulation(), E);
}
case Builtin::BIstrlen:
// A call to strlen is not a constant expression.
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
<< /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
LLVM_FALLTHROUGH; // HLSL Change.
case Builtin::BI__builtin_strlen: {
// As an extension, we support __builtin_strlen() as a constant expression,
// and support folding strlen() to a constant.
LValue String;
if (!EvaluatePointer(E->getArg(0), String, Info))
return false;
// Fast path: if it's a string literal, search the string value.
if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
String.getLValueBase().dyn_cast<const Expr *>())) {
// The string literal may have embedded null characters. Find the first
// one and truncate there.
StringRef Str = S->getBytes();
int64_t Off = String.Offset.getQuantity();
if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
S->getCharByteWidth() == 1) {
Str = Str.substr(Off);
StringRef::size_type Pos = Str.find(0);
if (Pos != StringRef::npos)
Str = Str.substr(0, Pos);
return Success(Str.size(), E);
}
// Fall through to slow path to issue appropriate diagnostic.
}
// Slow path: scan the bytes of the string looking for the terminating 0.
QualType CharTy = E->getArg(0)->getType()->getPointeeType();
for (uint64_t Strlen = 0; /**/; ++Strlen) {
APValue Char;
if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
!Char.isInt())
return false;
if (!Char.getInt())
return Success(Strlen, E);
if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
return false;
}
}
case Builtin::BI__atomic_always_lock_free:
case Builtin::BI__atomic_is_lock_free:
case Builtin::BI__c11_atomic_is_lock_free: {
APSInt SizeVal;
if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
return false;
// For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
// of two less than the maximum inline atomic width, we know it is
// lock-free. If the size isn't a power of two, or greater than the
// maximum alignment where we promote atomics, we know it is not lock-free
// (at least not in the sense of atomic_is_lock_free). Otherwise,
// the answer can only be determined at runtime; for example, 16-byte
// atomics have lock-free implementations on some, but not all,
// x86-64 processors.
// Check power-of-two.
CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
if (Size.isPowerOfTwo()) {
// Check against inlining width.
unsigned InlineWidthBits =
Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
Size == CharUnits::One() ||
E->getArg(1)->isNullPointerConstant(Info.Ctx,
Expr::NPC_NeverValueDependent))
// OK, we will inline appropriately-aligned operations of this size,
// and _Atomic(T) is appropriately-aligned.
return Success(1, E);
QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
castAs<PointerType>()->getPointeeType();
if (!PointeeType->isIncompleteType() &&
Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
// OK, we will inline operations on this object.
return Success(1, E);
}
}
}
return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
Success(0, E) : Error(E);
}
}
}
static bool HasSameBase(const LValue &A, const LValue &B) {
if (!A.getLValueBase())
return !B.getLValueBase();
if (!B.getLValueBase())
return false;
if (A.getLValueBase().getOpaqueValue() !=
B.getLValueBase().getOpaqueValue()) {
const Decl *ADecl = GetLValueBaseDecl(A);
if (!ADecl)
return false;
const Decl *BDecl = GetLValueBaseDecl(B);
if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
return false;
}
return IsGlobalLValue(A.getLValueBase()) ||
A.getLValueCallIndex() == B.getLValueCallIndex();
}
/// \brief Determine whether this is a pointer past the end of the complete
/// object referred to by the lvalue.
static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
const LValue &LV) {
// A null pointer can be viewed as being "past the end" but we don't
// choose to look at it that way here.
if (!LV.getLValueBase())
return false;
// If the designator is valid and refers to a subobject, we're not pointing
// past the end.
if (!LV.getLValueDesignator().Invalid &&
!LV.getLValueDesignator().isOnePastTheEnd())
return false;
// We're a past-the-end pointer if we point to the byte after the object,
// no matter what our type or path is.
auto Size = Ctx.getTypeSizeInChars(getType(LV.getLValueBase()));
return LV.getLValueOffset() == Size;
}
namespace {
/// \brief Data recursive integer evaluator of certain binary operators.
///
/// We use a data recursive algorithm for binary operators so that we are able
/// to handle extreme cases of chained binary operators without causing stack
/// overflow.
class DataRecursiveIntBinOpEvaluator {
struct EvalResult {
APValue Val;
bool Failed;
EvalResult() : Failed(false) { }
void swap(EvalResult &RHS) {
Val.swap(RHS.Val);
Failed = RHS.Failed;
RHS.Failed = false;
}
};
struct Job {
const Expr *E;
EvalResult LHSResult; // meaningful only for binary operator expression.
enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
Job() : StoredInfo(nullptr) {}
void startSpeculativeEval(EvalInfo &Info) {
OldEvalStatus = Info.EvalStatus;
Info.EvalStatus.Diag = nullptr;
StoredInfo = &Info;
}
~Job() {
if (StoredInfo) {
StoredInfo->EvalStatus = OldEvalStatus;
}
}
private:
EvalInfo *StoredInfo; // non-null if status changed.
Expr::EvalStatus OldEvalStatus;
};
SmallVector<Job, 16> Queue;
IntExprEvaluator &IntEval;
EvalInfo &Info;
APValue &FinalResult;
public:
DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
: IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
/// \brief True if \param E is a binary operator that we are going to handle
/// data recursively.
/// We handle binary operators that are comma, logical, or that have operands
/// with integral or enumeration type.
static bool shouldEnqueue(const BinaryOperator *E) {
return E->getOpcode() == BO_Comma ||
E->isLogicalOp() ||
(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
}
bool Traverse(const BinaryOperator *E) {
enqueue(E);
EvalResult PrevResult;
while (!Queue.empty())
process(PrevResult);
if (PrevResult.Failed) return false;
FinalResult.swap(PrevResult.Val);
return true;
}
private:
bool Success(uint64_t Value, const Expr *E, APValue &Result) {
return IntEval.Success(Value, E, Result);
}
bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
return IntEval.Success(Value, E, Result);
}
bool Error(const Expr *E) {
return IntEval.Error(E);
}
bool Error(const Expr *E, diag::kind D) {
return IntEval.Error(E, D);
}
OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
return Info.CCEDiag(E, D);
}
// \brief Returns true if visiting the RHS is necessary, false otherwise.
bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
bool &SuppressRHSDiags);
bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
const BinaryOperator *E, APValue &Result);
void EvaluateExpr(const Expr *E, EvalResult &Result) {
Result.Failed = !Evaluate(Result.Val, Info, E);
if (Result.Failed)
Result.Val = APValue();
}
void process(EvalResult &Result);
void enqueue(const Expr *E) {
E = E->IgnoreParens();
Queue.resize(Queue.size()+1);
Queue.back().E = E;
Queue.back().Kind = Job::AnyExprKind;
}
};
}
bool DataRecursiveIntBinOpEvaluator::
VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
bool &SuppressRHSDiags) {
if (E->getOpcode() == BO_Comma) {
// Ignore LHS but note if we could not evaluate it.
if (LHSResult.Failed)
return Info.noteSideEffect();
return true;
}
if (E->isLogicalOp()) {
bool LHSAsBool;
if (!LHSResult.Failed && HandleConversionToBool(LHSResult.Val, LHSAsBool)) {
// We were able to evaluate the LHS, see if we can get away with not
// evaluating the RHS: 0 && X -> 0, 1 || X -> 1
if (LHSAsBool == (E->getOpcode() == BO_LOr)) {
Success(LHSAsBool, E, LHSResult.Val);
return false; // Ignore RHS
}
} else {
LHSResult.Failed = true;
// Since we weren't able to evaluate the left hand side, it
// must have had side effects.
if (!Info.noteSideEffect())
return false;
// We can't evaluate the LHS; however, sometimes the result
// is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
// Don't ignore RHS and suppress diagnostics from this arm.
SuppressRHSDiags = true;
}
return true;
}
assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure())
return false; // Ignore RHS;
return true;
}
bool DataRecursiveIntBinOpEvaluator::
VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
const BinaryOperator *E, APValue &Result) {
if (E->getOpcode() == BO_Comma) {
if (RHSResult.Failed)
return false;
Result = RHSResult.Val;
return true;
}
if (E->isLogicalOp()) {
bool lhsResult, rhsResult;
bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
if (LHSIsOK) {
if (RHSIsOK) {
if (E->getOpcode() == BO_LOr)
return Success(lhsResult || rhsResult, E, Result);
else
return Success(lhsResult && rhsResult, E, Result);
}
} else {
if (RHSIsOK) {
// We can't evaluate the LHS; however, sometimes the result
// is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
if (rhsResult == (E->getOpcode() == BO_LOr))
return Success(rhsResult, E, Result);
}
}
return false;
}
assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
if (LHSResult.Failed || RHSResult.Failed)
return false;
const APValue &LHSVal = LHSResult.Val;
const APValue &RHSVal = RHSResult.Val;
// Handle cases like (unsigned long)&a + 4.
if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
Result = LHSVal;
CharUnits AdditionalOffset =
CharUnits::fromQuantity(RHSVal.getInt().getZExtValue());
if (E->getOpcode() == BO_Add)
Result.getLValueOffset() += AdditionalOffset;
else
Result.getLValueOffset() -= AdditionalOffset;
return true;
}
// Handle cases like 4 + (unsigned long)&a
if (E->getOpcode() == BO_Add &&
RHSVal.isLValue() && LHSVal.isInt()) {
Result = RHSVal;
Result.getLValueOffset() +=
CharUnits::fromQuantity(LHSVal.getInt().getZExtValue());
return true;
}
if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
// Handle (intptr_t)&&A - (intptr_t)&&B.
if (!LHSVal.getLValueOffset().isZero() ||
!RHSVal.getLValueOffset().isZero())
return false;
const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
if (!LHSExpr || !RHSExpr)
return false;
const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
if (!LHSAddrExpr || !RHSAddrExpr)
return false;
// Make sure both labels come from the same function.
if (LHSAddrExpr->getLabel()->getDeclContext() !=
RHSAddrExpr->getLabel()->getDeclContext())
return false;
Result = APValue(LHSAddrExpr, RHSAddrExpr);
return true;
}
// All the remaining cases expect both operands to be an integer
if (!LHSVal.isInt() || !RHSVal.isInt())
return Error(E);
// Set up the width and signedness manually, in case it can't be deduced
// from the operation we're performing.
// FIXME: Don't do this in the cases where we can deduce it.
APSInt Value(Info.Ctx.getIntWidth(E->getType()),
E->getType()->isUnsignedIntegerOrEnumerationType());
if (!handleIntIntBinOp(Info, E, LHSVal.getInt(), E->getOpcode(),
RHSVal.getInt(), Value))
return false;
return Success(Value, E, Result);
}
void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
Job &job = Queue.back();
switch (job.Kind) {
case Job::AnyExprKind: {
if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
if (shouldEnqueue(Bop)) {
job.Kind = Job::BinOpKind;
enqueue(Bop->getLHS());
return;
}
}
EvaluateExpr(job.E, Result);
Queue.pop_back();
return;
}
case Job::BinOpKind: {
const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
bool SuppressRHSDiags = false;
if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
Queue.pop_back();
return;
}
if (SuppressRHSDiags)
job.startSpeculativeEval(Info);
job.LHSResult.swap(Result);
job.Kind = Job::BinOpVisitedLHSKind;
enqueue(Bop->getRHS());
return;
}
case Job::BinOpVisitedLHSKind: {
const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
EvalResult RHS;
RHS.swap(Result);
Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
Queue.pop_back();
return;
}
}
llvm_unreachable("Invalid Job::Kind!");
}
bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
return Error(E);
if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
QualType LHSTy = E->getLHS()->getType();
QualType RHSTy = E->getRHS()->getType();
if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
ComplexValue LHS, RHS;
bool LHSOK;
if (E->isAssignmentOp()) {
LValue LV;
EvaluateLValue(E->getLHS(), LV, Info);
LHSOK = false;
} else if (LHSTy->isRealFloatingType()) {
LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info);
if (LHSOK) {
LHS.makeComplexFloat();
LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
}
} else {
LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
}
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
if (E->getRHS()->getType()->isRealFloatingType()) {
if (!EvaluateFloat(E->getRHS(), RHS.FloatReal, Info) || !LHSOK)
return false;
RHS.makeComplexFloat();
RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
} else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
if (LHS.isComplexFloat()) {
APFloat::cmpResult CR_r =
LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
APFloat::cmpResult CR_i =
LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
if (E->getOpcode() == BO_EQ)
return Success((CR_r == APFloat::cmpEqual &&
CR_i == APFloat::cmpEqual), E);
else {
assert(E->getOpcode() == BO_NE &&
"Invalid complex comparison.");
return Success(((CR_r == APFloat::cmpGreaterThan ||
CR_r == APFloat::cmpLessThan ||
CR_r == APFloat::cmpUnordered) ||
(CR_i == APFloat::cmpGreaterThan ||
CR_i == APFloat::cmpLessThan ||
CR_i == APFloat::cmpUnordered)), E);
}
} else {
if (E->getOpcode() == BO_EQ)
return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
else {
assert(E->getOpcode() == BO_NE &&
"Invalid compex comparison.");
return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
}
}
}
if (LHSTy->isRealFloatingType() &&
RHSTy->isRealFloatingType()) {
APFloat RHS(0.0), LHS(0.0);
bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
return false;
APFloat::cmpResult CR = LHS.compare(RHS);
switch (E->getOpcode()) {
default:
llvm_unreachable("Invalid binary operator!");
case BO_LT:
return Success(CR == APFloat::cmpLessThan, E);
case BO_GT:
return Success(CR == APFloat::cmpGreaterThan, E);
case BO_LE:
return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
case BO_GE:
return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
E);
case BO_EQ:
return Success(CR == APFloat::cmpEqual, E);
case BO_NE:
return Success(CR == APFloat::cmpGreaterThan
|| CR == APFloat::cmpLessThan
|| CR == APFloat::cmpUnordered, E);
}
}
if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
LValue LHSValue, RHSValue;
bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
if (!LHSOK && Info.keepEvaluatingAfterFailure())
return false;
if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
return false;
// Reject differing bases from the normal codepath; we special-case
// comparisons to null.
if (!HasSameBase(LHSValue, RHSValue)) {
if (E->getOpcode() == BO_Sub) {
// Handle &&A - &&B.
if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
return false;
const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>();
if (!LHSExpr || !RHSExpr)
return false;
const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
if (!LHSAddrExpr || !RHSAddrExpr)
return false;
// Make sure both labels come from the same function.
if (LHSAddrExpr->getLabel()->getDeclContext() !=
RHSAddrExpr->getLabel()->getDeclContext())
return false;
Result = APValue(LHSAddrExpr, RHSAddrExpr);
return true;
}
// Inequalities and subtractions between unrelated pointers have
// unspecified or undefined behavior.
if (!E->isEqualityOp())
return Error(E);
// A constant address may compare equal to the address of a symbol.
// The one exception is that address of an object cannot compare equal
// to a null pointer constant.
if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
(!RHSValue.Base && !RHSValue.Offset.isZero()))
return Error(E);
// It's implementation-defined whether distinct literals will have
// distinct addresses. In clang, the result of such a comparison is
// unspecified, so it is not a constant expression. However, we do know
// that the address of a literal will be non-null.
if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
LHSValue.Base && RHSValue.Base)
return Error(E);
// We can't tell whether weak symbols will end up pointing to the same
// object.
if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
return Error(E);
// We can't compare the address of the start of one object with the
// past-the-end address of another object, per C++ DR1652.
if ((LHSValue.Base && LHSValue.Offset.isZero() &&
isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
(RHSValue.Base && RHSValue.Offset.isZero() &&
isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
return Error(E);
// We can't tell whether an object is at the same address as another
// zero sized object.
if ((RHSValue.Base && isZeroSized(LHSValue)) ||
(LHSValue.Base && isZeroSized(RHSValue)))
return Error(E);
// Pointers with different bases cannot represent the same object.
// (Note that clang defaults to -fmerge-all-constants, which can
// lead to inconsistent results for comparisons involving the address
// of a constant; this generally doesn't matter in practice.)
return Success(E->getOpcode() == BO_NE, E);
}
const CharUnits &LHSOffset = LHSValue.getLValueOffset();
const CharUnits &RHSOffset = RHSValue.getLValueOffset();
SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
if (E->getOpcode() == BO_Sub) {
// C++11 [expr.add]p6:
// Unless both pointers point to elements of the same array object, or
// one past the last element of the array object, the behavior is
// undefined.
if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
!AreElementsOfSameArray(getType(LHSValue.Base),
LHSDesignator, RHSDesignator))
CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
QualType Type = E->getLHS()->getType();
QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
CharUnits ElementSize;
if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
return false;
// As an extension, a type may have zero size (empty struct or union in
// C, array of zero length). Pointer subtraction in such cases has
// undefined behavior, so is not constant.
if (ElementSize.isZero()) {
Info.Diag(E, diag::note_constexpr_pointer_subtraction_zero_size)
<< ElementType;
return false;
}
// FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
// and produce incorrect results when it overflows. Such behavior
// appears to be non-conforming, but is common, so perhaps we should
// assume the standard intended for such cases to be undefined behavior
// and check for them.
// Compute (LHSOffset - RHSOffset) / Size carefully, checking for
// overflow in the final conversion to ptrdiff_t.
APSInt LHS(
llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
APSInt RHS(
llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
APSInt ElemSize(
llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
APSInt TrueResult = (LHS - RHS) / ElemSize;
APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
if (Result.extend(65) != TrueResult)
HandleOverflow(Info, E, TrueResult, E->getType());
return Success(Result, E);
}
// C++11 [expr.rel]p3:
// Pointers to void (after pointer conversions) can be compared, with a
// result defined as follows: If both pointers represent the same
// address or are both the null pointer value, the result is true if the
// operator is <= or >= and false otherwise; otherwise the result is
// unspecified.
// We interpret this as applying to pointers to *cv* void.
if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
E->isRelationalOp())
CCEDiag(E, diag::note_constexpr_void_comparison);
// C++11 [expr.rel]p2:
// - If two pointers point to non-static data members of the same object,
// or to subobjects or array elements fo such members, recursively, the
// pointer to the later declared member compares greater provided the
// two members have the same access control and provided their class is
// not a union.
// [...]
// - Otherwise pointer comparisons are unspecified.
if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
E->isRelationalOp()) {
bool WasArrayIndex;
unsigned Mismatch =
FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
RHSDesignator, WasArrayIndex);
// At the point where the designators diverge, the comparison has a
// specified value if:
// - we are comparing array indices
// - we are comparing fields of a union, or fields with the same access
// Otherwise, the result is unspecified and thus the comparison is not a
// constant expression.
if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
Mismatch < RHSDesignator.Entries.size()) {
const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
if (!LF && !RF)
CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
else if (!LF)
CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
<< getAsBaseClass(LHSDesignator.Entries[Mismatch])
<< RF->getParent() << RF;
else if (!RF)
CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
<< getAsBaseClass(RHSDesignator.Entries[Mismatch])
<< LF->getParent() << LF;
else if (!LF->getParent()->isUnion() &&
LF->getAccess() != RF->getAccess())
CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
<< LF << LF->getAccess() << RF << RF->getAccess()
<< LF->getParent();
}
}
// The comparison here must be unsigned, and performed with the same
// width as the pointer.
unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
uint64_t CompareLHS = LHSOffset.getQuantity();
uint64_t CompareRHS = RHSOffset.getQuantity();
assert(PtrSize <= 64 && "Unexpected pointer width");
uint64_t Mask = ~0ULL >> (64 - PtrSize);
CompareLHS &= Mask;
CompareRHS &= Mask;
// If there is a base and this is a relational operator, we can only
// compare pointers within the object in question; otherwise, the result
// depends on where the object is located in memory.
if (!LHSValue.Base.isNull() && E->isRelationalOp()) {
QualType BaseTy = getType(LHSValue.Base);
if (BaseTy->isIncompleteType())
return Error(E);
CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
uint64_t OffsetLimit = Size.getQuantity();
if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
return Error(E);
}
switch (E->getOpcode()) {
default: llvm_unreachable("missing comparison operator");
case BO_LT: return Success(CompareLHS < CompareRHS, E);
case BO_GT: return Success(CompareLHS > CompareRHS, E);
case BO_LE: return Success(CompareLHS <= CompareRHS, E);
case BO_GE: return Success(CompareLHS >= CompareRHS, E);
case BO_EQ: return Success(CompareLHS == CompareRHS, E);
case BO_NE: return Success(CompareLHS != CompareRHS, E);
}
}
}
if (LHSTy->isMemberPointerType()) {
assert(E->isEqualityOp() && "unexpected member pointer operation");
assert(RHSTy->isMemberPointerType() && "invalid comparison");
MemberPtr LHSValue, RHSValue;
bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
if (!LHSOK && Info.keepEvaluatingAfterFailure())
return false;
if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
return false;
// C++11 [expr.eq]p2:
// If both operands are null, they compare equal. Otherwise if only one is
// null, they compare unequal.
if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
}
// Otherwise if either is a pointer to a virtual member function, the
// result is unspecified.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
if (MD->isVirtual())
CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
if (MD->isVirtual())
CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
// Otherwise they compare equal if and only if they would refer to the
// same member of the same most derived object or the same subobject if
// they were dereferenced with a hypothetical object of the associated
// class type.
bool Equal = LHSValue == RHSValue;
return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
}
if (LHSTy->isNullPtrType()) {
assert(E->isComparisonOp() && "unexpected nullptr operation");
assert(RHSTy->isNullPtrType() && "missing pointer conversion");
// C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
// are compared, the result is true of the operator is <=, >= or ==, and
// false otherwise.
BinaryOperator::Opcode Opcode = E->getOpcode();
return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
}
assert((!LHSTy->isIntegralOrEnumerationType() ||
!RHSTy->isIntegralOrEnumerationType()) &&
"DataRecursiveIntBinOpEvaluator should have handled integral types");
// We can't continue from here for non-integral types.
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
}
/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
/// a result as the expression's type.
bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
switch(E->getKind()) {
case UETT_AlignOf: {
if (E->isArgumentType())
return Success(GetAlignOfType(Info, E->getArgumentType()), E);
else
return Success(GetAlignOfExpr(Info, E->getArgumentExpr()), E);
}
case UETT_VecStep: {
QualType Ty = E->getTypeOfArgument();
if (Ty->isVectorType()) {
unsigned n = Ty->castAs<VectorType>()->getNumElements();
// The vec_step built-in functions that take a 3-component
// vector return 4. (OpenCL 1.1 spec 6.11.12)
if (n == 3)
n = 4;
return Success(n, E);
} else
return Success(1, E);
}
case UETT_SizeOf: {
QualType SrcTy = E->getTypeOfArgument();
// C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
// the result is the size of the referenced type."
if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
SrcTy = Ref->getPointeeType();
CharUnits Sizeof;
if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
return false;
return Success(Sizeof, E);
}
case UETT_OpenMPRequiredSimdAlign:
assert(E->isArgumentType());
return Success(
Info.Ctx.toCharUnitsFromBits(
Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType()))
.getQuantity(),
E);
// HLSL Change Begins
case UETT_ArrayLength: {
QualType SrcTy = E->getTypeOfArgument();
assert(isa<ConstantArrayType>(SrcTy));
const ConstantArrayType *CAT = cast<ConstantArrayType>(SrcTy);
return Success(CAT->getSize(), E);
}
// HLSL Change Ends
}
llvm_unreachable("unknown expr/type trait");
}
bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
CharUnits Result;
unsigned n = OOE->getNumComponents();
if (n == 0)
return Error(OOE);
QualType CurrentType = OOE->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i);
switch (ON.getKind()) {
case OffsetOfExpr::OffsetOfNode::Array: {
const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
APSInt IdxResult;
if (!EvaluateInteger(Idx, IdxResult, Info))
return false;
const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
if (!AT)
return Error(OOE);
CurrentType = AT->getElementType();
CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
Result += IdxResult.getSExtValue() * ElementSize;
break;
}
case OffsetOfExpr::OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return Error(OOE);
RecordDecl *RD = RT->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
assert(i < RL.getFieldCount() && "offsetof field in wrong type");
Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
CurrentType = MemberDecl->getType().getNonReferenceType();
break;
}
case OffsetOfExpr::OffsetOfNode::Identifier:
llvm_unreachable("dependent __builtin_offsetof");
case OffsetOfExpr::OffsetOfNode::Base: {
CXXBaseSpecifier *BaseSpec = ON.getBase();
if (BaseSpec->isVirtual())
return Error(OOE);
// Find the layout of the class whose base we are looking into.
const RecordType *RT = CurrentType->getAs<RecordType>();
if (!RT)
return Error(OOE);
RecordDecl *RD = RT->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
// Find the base class itself.
CurrentType = BaseSpec->getType();
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
if (!BaseRT)
return Error(OOE);
// Add the offset to the base.
Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
break;
}
}
}
return Success(Result, OOE);
}
bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
default:
// Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
// See C99 6.6p3.
return Error(E);
case UO_Extension:
// FIXME: Should extension allow i-c-e extension expressions in its scope?
// If so, we could clear the diagnostic ID.
return Visit(E->getSubExpr());
case UO_Plus:
// The result is just the value.
return Visit(E->getSubExpr());
case UO_Minus: {
if (!Visit(E->getSubExpr()))
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
if (Value.isSigned() && Value.isMinSignedValue())
HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
E->getType());
return Success(-Value, E);
}
case UO_Not: {
if (!Visit(E->getSubExpr()))
return false;
if (!Result.isInt()) return Error(E);
return Success(~Result.getInt(), E);
}
case UO_LNot: {
bool bres;
if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
return false;
return Success(!bres, E);
}
}
}
/// HandleCast - This is used to evaluate implicit or explicit casts where the
/// result type is integer.
bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
const Expr *SubExpr = E->getSubExpr();
QualType DestType = E->getType();
QualType SrcType = SubExpr->getType();
switch (E->getCastKind()) {
case CK_BaseToDerived:
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToPointer:
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_ToVoid:
case CK_VectorSplat:
case CK_IntegralToFloating:
case CK_FloatingCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingRealToComplex:
case CK_FloatingComplexToReal:
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralRealToComplex:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
case CK_Dependent:
case CK_LValueBitCast:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_HLSLVectorToScalarCast: // HLSL Change
case CK_HLSLMatrixToScalarCast: // HLSL Change
case CK_FlatConversion: // HLSL Change
return Error(E);
case CK_UserDefinedConversion:
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NoOp:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_MemberPointerToBoolean:
case CK_PointerToBoolean:
case CK_IntegralToBoolean:
case CK_FloatingToBoolean:
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToBoolean: {
bool BoolResult;
if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
return false;
return Success(BoolResult, E);
}
case CK_IntegralCast: {
if (!Visit(SubExpr))
return false;
if (!Result.isInt()) {
// Allow casts of address-of-label differences if they are no-ops
// or narrowing. (The narrowing case isn't actually guaranteed to
// be constant-evaluatable except in some narrow cases which are hard
// to detect here. We let it through on the assumption the user knows
// what they are doing.)
if (Result.isAddrLabelDiff())
return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
// Only allow casts of lvalues if they are lossless.
return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
}
return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
Result.getInt()), E);
}
case CK_PointerToIntegral: {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
LValue LV;
if (!EvaluatePointer(SubExpr, LV, Info))
return false;
if (LV.getLValueBase()) {
// Only allow based lvalue casts if they are lossless.
// FIXME: Allow a larger integer size than the pointer size, and allow
// narrowing back down to pointer width in subsequent integral casts.
// FIXME: Check integer type's active bits, not its type size.
if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
return Error(E);
LV.Designator.setInvalid();
LV.moveInto(Result);
return true;
}
APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
SrcType);
return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
}
case CK_IntegralComplexToReal: {
ComplexValue C;
if (!EvaluateComplex(SubExpr, C, Info))
return false;
return Success(C.getComplexIntReal(), E);
}
case CK_FloatingToIntegral: {
APFloat F(0.0);
if (!EvaluateFloat(SubExpr, F, Info))
return false;
APSInt Value;
if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
return false;
return Success(Value, E);
}
}
llvm_unreachable("unknown cast resulting in integral value");
}
bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue LV;
if (!EvaluateComplex(E->getSubExpr(), LV, Info))
return false;
if (!LV.isComplexInt())
return Error(E);
return Success(LV.getComplexIntReal(), E);
}
return Visit(E->getSubExpr());
}
bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isComplexIntegerType()) {
ComplexValue LV;
if (!EvaluateComplex(E->getSubExpr(), LV, Info))
return false;
if (!LV.isComplexInt())
return Error(E);
return Success(LV.getComplexIntImag(), E);
}
VisitIgnoredValue(E->getSubExpr());
return Success(0, E);
}
bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
return Success(E->getPackLength(), E);
}
bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
return Success(E->getValue(), E);
}
//===----------------------------------------------------------------------===//
// Float Evaluation
//===----------------------------------------------------------------------===//
namespace {
class FloatExprEvaluator
: public ExprEvaluatorBase<FloatExprEvaluator> {
APFloat &Result;
public:
FloatExprEvaluator(EvalInfo &info, APFloat &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
bool Success(const APValue &V, const Expr *e) {
Result = V.getFloat();
return true;
}
bool ZeroInitialization(const Expr *E) {
Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
return true;
}
bool VisitCallExpr(const CallExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitFloatingLiteral(const FloatingLiteral *E);
bool VisitCastExpr(const CastExpr *E);
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(E);
}
static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
QualType ResultTy,
const Expr *Arg,
bool SNaN,
llvm::APFloat &Result) {
const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
if (!S) return false;
const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
llvm::APInt fill;
// Treat empty strings as if they were zero.
if (S->getString().empty())
fill = llvm::APInt(32, 0);
else if (S->getString().getAsInteger(0, fill))
return false;
if (Context.getTargetInfo().isNan2008()) {
if (SNaN)
Result = llvm::APFloat::getSNaN(Sem, false, &fill);
else
Result = llvm::APFloat::getQNaN(Sem, false, &fill);
} else {
// Prior to IEEE 754-2008, architectures were allowed to choose whether
// the first bit of their significand was set for qNaN or sNaN. MIPS chose
// a different encoding to what became a standard in 2008, and for pre-
// 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
// sNaN. This is now known as "legacy NaN" encoding.
if (SNaN)
Result = llvm::APFloat::getQNaN(Sem, false, &fill);
else
Result = llvm::APFloat::getSNaN(Sem, false, &fill);
}
return true;
}
bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
switch (E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
case Builtin::BI__builtin_huge_val:
case Builtin::BI__builtin_huge_valf:
case Builtin::BI__builtin_huge_vall:
case Builtin::BI__builtin_inf:
case Builtin::BI__builtin_inff:
case Builtin::BI__builtin_infl: {
const llvm::fltSemantics &Sem =
Info.Ctx.getFloatTypeSemantics(E->getType());
Result = llvm::APFloat::getInf(Sem);
return true;
}
case Builtin::BI__builtin_nans:
case Builtin::BI__builtin_nansf:
case Builtin::BI__builtin_nansl:
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
true, Result))
return Error(E);
return true;
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
false, Result))
return Error(E);
return true;
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
if (!EvaluateFloat(E->getArg(0), Result, Info))
return false;
if (Result.isNegative())
Result.changeSign();
return true;
// FIXME: Builtin::BI__builtin_powi
// FIXME: Builtin::BI__builtin_powif
// FIXME: Builtin::BI__builtin_powil
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
case Builtin::BI__builtin_copysignl: {
APFloat RHS(0.);
if (!EvaluateFloat(E->getArg(0), Result, Info) ||
!EvaluateFloat(E->getArg(1), RHS, Info))
return false;
Result.copySign(RHS);
return true;
}
}
}
bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue CV;
if (!EvaluateComplex(E->getSubExpr(), CV, Info))
return false;
Result = CV.FloatReal;
return true;
}
return Visit(E->getSubExpr());
}
bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue CV;
if (!EvaluateComplex(E->getSubExpr(), CV, Info))
return false;
Result = CV.FloatImag;
return true;
}
VisitIgnoredValue(E->getSubExpr());
const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
Result = llvm::APFloat::getZero(Sem);
return true;
}
bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
default: return Error(E);
case UO_Plus:
return EvaluateFloat(E->getSubExpr(), Result, Info);
case UO_Minus:
if (!EvaluateFloat(E->getSubExpr(), Result, Info))
return false;
Result.changeSign();
return true;
}
}
bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
APFloat RHS(0.0);
bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
return EvaluateFloat(E->getRHS(), RHS, Info) && LHSOK &&
handleFloatFloatBinOp(Info, E, Result, E->getOpcode(), RHS);
}
bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
Result = E->getValue();
return true;
}
bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
const Expr* SubExpr = E->getSubExpr();
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_IntegralToFloating: {
APSInt IntResult;
return EvaluateInteger(SubExpr, IntResult, Info) &&
HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
E->getType(), Result);
}
case CK_FloatingCast: {
if (!Visit(SubExpr))
return false;
return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
Result);
}
case CK_FloatingComplexToReal: {
ComplexValue V;
if (!EvaluateComplex(SubExpr, V, Info))
return false;
Result = V.getComplexFloatReal();
return true;
}
}
}
//===----------------------------------------------------------------------===//
// Complex Evaluation (for float and integer)
//===----------------------------------------------------------------------===//
namespace {
class ComplexExprEvaluator
: public ExprEvaluatorBase<ComplexExprEvaluator> {
ComplexValue &Result;
public:
ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
bool Success(const APValue &V, const Expr *e) {
Result.setFrom(V);
return true;
}
bool ZeroInitialization(const Expr *E);
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
bool VisitCastExpr(const CastExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitUnaryOperator(const UnaryOperator *E);
bool VisitInitListExpr(const InitListExpr *E);
};
} // end anonymous namespace
static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isAnyComplexType());
return ComplexExprEvaluator(Info, Result).Visit(E);
}
bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
if (ElemTy->isRealFloatingType()) {
Result.makeComplexFloat();
APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
Result.FloatReal = Zero;
Result.FloatImag = Zero;
} else {
Result.makeComplexInt();
APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
Result.IntReal = Zero;
Result.IntImag = Zero;
}
return true;
}
bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
const Expr* SubExpr = E->getSubExpr();
if (SubExpr->getType()->isRealFloatingType()) {
Result.makeComplexFloat();
APFloat &Imag = Result.FloatImag;
if (!EvaluateFloat(SubExpr, Imag, Info))
return false;
Result.FloatReal = APFloat(Imag.getSemantics());
return true;
} else {
assert(SubExpr->getType()->isIntegerType() &&
"Unexpected imaginary literal.");
Result.makeComplexInt();
APSInt &Imag = Result.IntImag;
if (!EvaluateInteger(SubExpr, Imag, Info))
return false;
Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
return true;
}
}
bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_BitCast:
case CK_BaseToDerived:
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToPointer:
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
case CK_ToVoid:
case CK_VectorSplat:
case CK_IntegralCast:
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingComplexToReal:
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NoOp:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_Dependent:
case CK_LValueBitCast:
case CK_UserDefinedConversion:
return Error(E);
case CK_FloatingRealToComplex: {
APFloat &Real = Result.FloatReal;
if (!EvaluateFloat(E->getSubExpr(), Real, Info))
return false;
Result.makeComplexFloat();
Result.FloatImag = APFloat(Real.getSemantics());
return true;
}
case CK_FloatingComplexCast: {
if (!Visit(E->getSubExpr()))
return false;
QualType To = E->getType()->getAs<ComplexType>()->getElementType();
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
}
case CK_FloatingComplexToIntegralComplex: {
if (!Visit(E->getSubExpr()))
return false;
QualType To = E->getType()->getAs<ComplexType>()->getElementType();
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
Result.makeComplexInt();
return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
To, Result.IntReal) &&
HandleFloatToIntCast(Info, E, From, Result.FloatImag,
To, Result.IntImag);
}
case CK_IntegralRealToComplex: {
APSInt &Real = Result.IntReal;
if (!EvaluateInteger(E->getSubExpr(), Real, Info))
return false;
Result.makeComplexInt();
Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
return true;
}
case CK_IntegralComplexCast: {
if (!Visit(E->getSubExpr()))
return false;
QualType To = E->getType()->getAs<ComplexType>()->getElementType();
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
return true;
}
case CK_IntegralComplexToFloatingComplex: {
if (!Visit(E->getSubExpr()))
return false;
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
QualType From
= E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.makeComplexFloat();
return HandleIntToFloatCast(Info, E, From, Result.IntReal,
To, Result.FloatReal) &&
HandleIntToFloatCast(Info, E, From, Result.IntImag,
To, Result.FloatImag);
}
}
llvm_unreachable("unknown cast resulting in complex value");
}
bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
// Track whether the LHS or RHS is real at the type system level. When this is
// the case we can simplify our evaluation strategy.
bool LHSReal = false, RHSReal = false;
bool LHSOK;
if (E->getLHS()->getType()->isRealFloatingType()) {
LHSReal = true;
APFloat &Real = Result.FloatReal;
LHSOK = EvaluateFloat(E->getLHS(), Real, Info);
if (LHSOK) {
Result.makeComplexFloat();
Result.FloatImag = APFloat(Real.getSemantics());
}
} else {
LHSOK = Visit(E->getLHS());
}
if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
ComplexValue RHS;
if (E->getRHS()->getType()->isRealFloatingType()) {
RHSReal = true;
APFloat &Real = RHS.FloatReal;
if (!EvaluateFloat(E->getRHS(), Real, Info) || !LHSOK)
return false;
RHS.makeComplexFloat();
RHS.FloatImag = APFloat(Real.getSemantics());
} else if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
assert(!(LHSReal && RHSReal) &&
"Cannot have both operands of a complex operation be real.");
switch (E->getOpcode()) {
default: return Error(E);
case BO_Add:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
APFloat::rmNearestTiesToEven);
if (LHSReal)
Result.getComplexFloatImag() = RHS.getComplexFloatImag();
else if (!RHSReal)
Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
APFloat::rmNearestTiesToEven);
} else {
Result.getComplexIntReal() += RHS.getComplexIntReal();
Result.getComplexIntImag() += RHS.getComplexIntImag();
}
break;
case BO_Sub:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
APFloat::rmNearestTiesToEven);
if (LHSReal) {
Result.getComplexFloatImag() = RHS.getComplexFloatImag();
Result.getComplexFloatImag().changeSign();
} else if (!RHSReal) {
Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
APFloat::rmNearestTiesToEven);
}
} else {
Result.getComplexIntReal() -= RHS.getComplexIntReal();
Result.getComplexIntImag() -= RHS.getComplexIntImag();
}
break;
case BO_Mul:
if (Result.isComplexFloat()) {
// This is an implementation of complex multiplication according to the
// constraints laid out in C11 Annex G. The implemantion uses the
// following naming scheme:
// (a + ib) * (c + id)
ComplexValue LHS = Result;
APFloat &A = LHS.getComplexFloatReal();
APFloat &B = LHS.getComplexFloatImag();
APFloat &C = RHS.getComplexFloatReal();
APFloat &D = RHS.getComplexFloatImag();
APFloat &ResR = Result.getComplexFloatReal();
APFloat &ResI = Result.getComplexFloatImag();
if (LHSReal) {
assert(!RHSReal && "Cannot have two real operands for a complex op!");
ResR = A * C;
ResI = A * D;
} else if (RHSReal) {
ResR = C * A;
ResI = C * B;
} else {
// In the fully general case, we need to handle NaNs and infinities
// robustly.
APFloat AC = A * C;
APFloat BD = B * D;
APFloat AD = A * D;
APFloat BC = B * C;
ResR = AC - BD;
ResI = AD + BC;
if (ResR.isNaN() && ResI.isNaN()) {
bool Recalc = false;
if (A.isInfinity() || B.isInfinity()) {
A = APFloat::copySign(
APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
B = APFloat::copySign(
APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
if (C.isNaN())
C = APFloat::copySign(APFloat(C.getSemantics()), C);
if (D.isNaN())
D = APFloat::copySign(APFloat(D.getSemantics()), D);
Recalc = true;
}
if (C.isInfinity() || D.isInfinity()) {
C = APFloat::copySign(
APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
D = APFloat::copySign(
APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
if (A.isNaN())
A = APFloat::copySign(APFloat(A.getSemantics()), A);
if (B.isNaN())
B = APFloat::copySign(APFloat(B.getSemantics()), B);
Recalc = true;
}
if (!Recalc && (AC.isInfinity() || BD.isInfinity() ||
AD.isInfinity() || BC.isInfinity())) {
if (A.isNaN())
A = APFloat::copySign(APFloat(A.getSemantics()), A);
if (B.isNaN())
B = APFloat::copySign(APFloat(B.getSemantics()), B);
if (C.isNaN())
C = APFloat::copySign(APFloat(C.getSemantics()), C);
if (D.isNaN())
D = APFloat::copySign(APFloat(D.getSemantics()), D);
Recalc = true;
}
if (Recalc) {
ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D);
ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C);
}
}
}
} else {
ComplexValue LHS = Result;
Result.getComplexIntReal() =
(LHS.getComplexIntReal() * RHS.getComplexIntReal() -
LHS.getComplexIntImag() * RHS.getComplexIntImag());
Result.getComplexIntImag() =
(LHS.getComplexIntReal() * RHS.getComplexIntImag() +
LHS.getComplexIntImag() * RHS.getComplexIntReal());
}
break;
case BO_Div:
if (Result.isComplexFloat()) {
// This is an implementation of complex division according to the
// constraints laid out in C11 Annex G. The implemantion uses the
// following naming scheme:
// (a + ib) / (c + id)
ComplexValue LHS = Result;
APFloat &A = LHS.getComplexFloatReal();
APFloat &B = LHS.getComplexFloatImag();
APFloat &C = RHS.getComplexFloatReal();
APFloat &D = RHS.getComplexFloatImag();
APFloat &ResR = Result.getComplexFloatReal();
APFloat &ResI = Result.getComplexFloatImag();
if (RHSReal) {
ResR = A / C;
ResI = B / C;
} else {
if (LHSReal) {
// No real optimizations we can do here, stub out with zero.
B = APFloat::getZero(A.getSemantics());
}
int DenomLogB = 0;
APFloat MaxCD = maxnum(abs(C), abs(D));
if (MaxCD.isFinite()) {
DenomLogB = ilogb(MaxCD);
C = scalbn(C, -DenomLogB);
D = scalbn(D, -DenomLogB);
}
APFloat Denom = C * C + D * D;
ResR = scalbn((A * C + B * D) / Denom, -DenomLogB);
ResI = scalbn((B * C - A * D) / Denom, -DenomLogB);
if (ResR.isNaN() && ResI.isNaN()) {
if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A;
ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B;
} else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
D.isFinite()) {
A = APFloat::copySign(
APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A);
B = APFloat::copySign(
APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B);
ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D);
ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D);
} else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
C = APFloat::copySign(
APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C);
D = APFloat::copySign(
APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D);
ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D);
ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D);
}
}
}
} else {
if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
return Error(E, diag::note_expr_divide_by_zero);
ComplexValue LHS = Result;
APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
RHS.getComplexIntImag() * RHS.getComplexIntImag();
Result.getComplexIntReal() =
(LHS.getComplexIntReal() * RHS.getComplexIntReal() +
LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
Result.getComplexIntImag() =
(LHS.getComplexIntImag() * RHS.getComplexIntReal() -
LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
}
break;
}
return true;
}
bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
// Get the operand value into 'Result'.
if (!Visit(E->getSubExpr()))
return false;
switch (E->getOpcode()) {
default:
return Error(E);
case UO_Extension:
return true;
case UO_Plus:
// The result is always just the subexpr.
return true;
case UO_Minus:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().changeSign();
Result.getComplexFloatImag().changeSign();
}
else {
Result.getComplexIntReal() = -Result.getComplexIntReal();
Result.getComplexIntImag() = -Result.getComplexIntImag();
}
return true;
case UO_Not:
if (Result.isComplexFloat())
Result.getComplexFloatImag().changeSign();
else
Result.getComplexIntImag() = -Result.getComplexIntImag();
return true;
}
}
bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
if (E->getNumInits() == 2) {
if (E->getType()->isComplexType()) {
Result.makeComplexFloat();
if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
return false;
if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
return false;
} else {
Result.makeComplexInt();
if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
return false;
if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
return false;
}
return true;
}
return ExprEvaluatorBaseTy::VisitInitListExpr(E);
}
//===----------------------------------------------------------------------===//
// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
// implicit conversion.
//===----------------------------------------------------------------------===//
namespace {
class AtomicExprEvaluator :
public ExprEvaluatorBase<AtomicExprEvaluator> {
APValue &Result;
public:
AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
: ExprEvaluatorBaseTy(Info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result = V;
return true;
}
bool ZeroInitialization(const Expr *E) {
ImplicitValueInitExpr VIE(
E->getType()->castAs<AtomicType>()->getValueType());
return Evaluate(Result, Info, &VIE);
}
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_NonAtomicToAtomic:
return Evaluate(Result, Info, E->getSubExpr());
}
}
};
} // end anonymous namespace
static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isAtomicType());
return AtomicExprEvaluator(Info, Result).Visit(E);
}
//===----------------------------------------------------------------------===//
// Void expression evaluation, primarily for a cast to void on the LHS of a
// comma operator
//===----------------------------------------------------------------------===//
namespace {
class VoidExprEvaluator
: public ExprEvaluatorBase<VoidExprEvaluator> {
public:
VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
bool Success(const APValue &V, const Expr *e) { return true; }
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_ToVoid:
VisitIgnoredValue(E->getSubExpr());
return true;
}
}
bool VisitCallExpr(const CallExpr *E) {
switch (E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
// The argument is not evaluated!
return true;
}
}
};
} // end anonymous namespace
static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isVoidType());
return VoidExprEvaluator(Info).Visit(E);
}
//===----------------------------------------------------------------------===//
// Top level Expr::EvaluateAsRValue method.
//===----------------------------------------------------------------------===//
static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
// In C, function designators are not lvalues, but we evaluate them as if they
// are.
// HLSL Change Begins.
if (Info.getLangOpts().HLSL) {
if (E->isRValue() && hlsl::IsHLSLVecType(E->getType()) && EvaluateVector(E, Result, Info))
return true;
if (E->getStmtClass() == Stmt::InitListExprClass && !E->getType()->isScalarType())
return false;
if (E->getStmtClass() == Stmt::DeclRefExprClass) {
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
const ValueDecl *VD = DRE->getDecl();
// External variable is in cbuffer, cannot use as immediate.
if (VD->hasExternalFormalLinkage() &&
!isa<EnumConstantDecl>(VD))
return false;
}
}
// HLSL Change Ends.
QualType T = E->getType();
if (E->isGLValue() || T->isFunctionType()) {
LValue LV;
if (!EvaluateLValue(E, LV, Info))
return false;
LV.moveInto(Result);
} else if (T->isVectorType()) {
if (!EvaluateVector(E, Result, Info))
return false;
} else if (T->isIntegralOrEnumerationType()) {
if (!IntExprEvaluator(Info, Result).Visit(E))
return false;
} else if (T->hasPointerRepresentation()) {
LValue LV;
if (!EvaluatePointer(E, LV, Info))
return false;
LV.moveInto(Result);
} else if (T->isRealFloatingType()) {
llvm::APFloat F(0.0);
if (!EvaluateFloat(E, F, Info))
return false;
Result = APValue(F);
} else if (T->isAnyComplexType()) {
ComplexValue C;
if (!EvaluateComplex(E, C, Info))
return false;
C.moveInto(Result);
} else if (T->isMemberPointerType()) {
MemberPtr P;
if (!EvaluateMemberPointer(E, P, Info))
return false;
P.moveInto(Result);
return true;
} else if (T->isArrayType()) {
LValue LV;
LV.set(E, Info.CurrentCall->Index);
APValue &Value = Info.CurrentCall->createTemporary(E, false);
if (!EvaluateArray(E, LV, Value, Info))
return false;
Result = Value;
} else if (T->isRecordType()) {
LValue LV;
LV.set(E, Info.CurrentCall->Index);
APValue &Value = Info.CurrentCall->createTemporary(E, false);
if (!EvaluateRecord(E, LV, Value, Info))
return false;
Result = Value;
} else if (T->isVoidType()) {
if (!Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_nonliteral)
<< E->getType();
if (!EvaluateVoid(E, Info))
return false;
} else if (T->isAtomicType()) {
if (!EvaluateAtomic(E, Result, Info))
return false;
} else if (Info.getLangOpts().CPlusPlus11) {
Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType();
return false;
} else {
Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
}
return true;
}
/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
/// cases, the in-place evaluation is essential, since later initializers for
/// an object can indirectly refer to subobjects which were initialized earlier.
static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
const Expr *E, bool AllowNonLiteralTypes) {
assert(!E->isValueDependent());
if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
return false;
if (E->isRValue()) {
// Evaluate arrays and record types in-place, so that later initializers can
// refer to earlier-initialized members of the object.
if (E->getType()->isArrayType())
return EvaluateArray(E, This, Result, Info);
else if (E->getType()->isRecordType())
return EvaluateRecord(E, This, Result, Info);
}
// For any other type, in-place evaluation is unimportant.
return Evaluate(Result, Info, E);
}
/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
/// lvalue-to-rvalue cast if it is an lvalue.
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
if (E->getType().isNull())
return false;
if (!CheckLiteralType(Info, E))
return false;
if (!::Evaluate(Result, Info, E))
return false;
if (E->isGLValue()) {
LValue LV;
LV.setFrom(Info.Ctx, Result);
if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
return false;
}
// Check this core constant expression is a constant expression.
return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
}
static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
const ASTContext &Ctx, bool &IsConst) {
// Fast-path evaluations of integer literals, since we sometimes see files
// containing vast quantities of these.
if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(Exp)) {
Result.Val = APValue(APSInt(L->getValue(),
L->getType()->isUnsignedIntegerType()));
IsConst = true;
return true;
}
// This case should be rare, but we need to check it before we check on
// the type below.
if (Exp->getType().isNull()) {
IsConst = false;
return true;
}
// FIXME: Evaluating values of large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (Exp->isRValue() && (Exp->getType()->isArrayType() ||
Exp->getType()->isRecordType()) &&
!hlsl::IsHLSLVecMatType(Exp->getType()) && // HLSL Change
!Ctx.getLangOpts().CPlusPlus11) {
IsConst = false;
return true;
}
return false;
}
/// EvaluateAsRValue - Return true if this is a constant which we can fold using
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
/// will be applied to the result.
bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
bool IsConst;
if (FastEvaluateAsRValue(this, Result, Ctx, IsConst))
return IsConst;
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
return ::EvaluateAsRValue(Info, this, Result.Val);
}
bool Expr::EvaluateAsBooleanCondition(bool &Result,
const ASTContext &Ctx) const {
EvalResult Scratch;
return EvaluateAsRValue(Scratch, Ctx) &&
HandleConversionToBool(Scratch.Val, Result);
}
bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects) const {
if (!getType()->isIntegralOrEnumerationType())
return false;
EvalResult ExprResult;
if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
(!AllowSideEffects && ExprResult.HasSideEffects))
return false;
Result = ExprResult.Val.getInt();
return true;
}
bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
LValue LV;
if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
!CheckLValueConstantExpression(Info, getExprLoc(),
Ctx.getLValueReferenceType(getType()), LV))
return false;
LV.moveInto(Result.Val);
return true;
}
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
// FIXME: Evaluating initializers for large array and record types can cause
// performance problems. Only do so in C++11 for now.
if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
!Ctx.getLangOpts().CPlusPlus11)
return false;
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
EvalInfo InitInfo(Ctx, EStatus, EvalInfo::EM_ConstantFold);
InitInfo.setEvaluatingDecl(VD, Value);
LValue LVal;
LVal.set(VD);
// C++11 [basic.start.init]p2:
// Variables with static storage duration or thread storage duration shall be
// zero-initialized before any other initialization takes place.
// This behavior is not present in C.
if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
!VD->getType()->isReferenceType()) {
ImplicitValueInitExpr VIE(VD->getType());
if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE,
/*AllowNonLiteralTypes=*/true))
return false;
}
if (!EvaluateInPlace(Value, InitInfo, LVal, this,
/*AllowNonLiteralTypes=*/true) ||
EStatus.HasSideEffects)
return false;
return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(),
Value);
}
/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
/// constant folded, but discard the result.
bool Expr::isEvaluatable(const ASTContext &Ctx) const {
EvalResult Result;
return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
}
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
EvalResult EvalResult;
EvalResult.Diag = Diag;
bool Result = EvaluateAsRValue(EvalResult, Ctx);
(void)Result;
assert(Result && "Could not evaluate expression");
assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
return EvalResult.Val.getInt();
}
void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
bool IsConst;
EvalResult EvalResult;
if (!FastEvaluateAsRValue(this, EvalResult, Ctx, IsConst)) {
EvalInfo Info(Ctx, EvalResult, EvalInfo::EM_EvaluateForOverflow);
(void)::EvaluateAsRValue(Info, this, EvalResult.Val);
}
}
bool Expr::EvalResult::isGlobalLValue() const {
assert(Val.isLValue());
return IsGlobalLValue(Val.getLValueBase());
}
/// isIntegerConstantExpr - this recursive routine will test if an expression is
/// an integer constant expression.
/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
/// comma, etc
// CheckICE - This function does the fundamental ICE checking: the returned
// ICEDiag contains an ICEKind indicating whether the expression is an ICE,
// and a (possibly null) SourceLocation indicating the location of the problem.
//
// Note that to reduce code duplication, this helper does no evaluation
// itself; the caller checks whether the expression is evaluatable, and
// in the rare cases where CheckICE actually cares about the evaluated
// value, it calls into Evalute.
namespace {
enum ICEKind {
/// This expression is an ICE.
IK_ICE,
/// This expression is not an ICE, but if it isn't evaluated, it's
/// a legal subexpression for an ICE. This return value is used to handle
/// the comma operator in C99 mode, and non-constant subexpressions.
IK_ICEIfUnevaluated,
/// This expression is not an ICE, and is not a legal subexpression for one.
IK_NotICE
};
struct ICEDiag {
ICEKind Kind;
SourceLocation Loc;
ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
};
}
static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
Expr::EvalResult EVResult;
if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
!EVResult.Val.isInt())
return ICEDiag(IK_NotICE, E->getLocStart());
return NoDiag();
}
static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
assert(!E->isValueDependent() && "Should not see value dependent exprs!");
if (!E->getType()->isIntegralOrEnumerationType())
return ICEDiag(IK_NotICE, E->getLocStart());
switch (E->getStmtClass()) {
#define ABSTRACT_STMT(Node)
#define STMT(Node, Base) case Expr::Node##Class:
#define EXPR(Node, Base)
#include "clang/AST/StmtNodes.inc"
case Expr::PredefinedExprClass:
case Expr::FloatingLiteralClass:
case Expr::ImaginaryLiteralClass:
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
case Expr::MemberExprClass:
case Expr::CompoundAssignOperatorClass:
case Expr::CompoundLiteralExprClass:
case Expr::ExtVectorElementExprClass:
case Expr::ExtMatrixElementExprClass: // HLSL Change
case Expr::HLSLVectorElementExprClass: // HLSL Change
case Expr::DesignatedInitExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
case Expr::ImplicitValueInitExprClass:
case Expr::ParenListExprClass:
case Expr::VAArgExprClass:
case Expr::AddrLabelExprClass:
case Expr::StmtExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CUDAKernelCallExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::CXXNullPtrLiteralExprClass:
case Expr::UserDefinedLiteralClass:
case Expr::CXXThisExprClass:
case Expr::CXXThrowExprClass:
case Expr::CXXNewExprClass:
case Expr::CXXDeleteExprClass:
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnresolvedLookupExprClass:
case Expr::TypoExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXConstructExprClass:
case Expr::CXXStdInitializerListExprClass:
case Expr::CXXBindTemporaryExprClass:
case Expr::ExprWithCleanupsClass:
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXUnresolvedConstructExprClass:
case Expr::CXXDependentScopeMemberExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCEncodeExprClass:
case Expr::ObjCMessageExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCIvarRefExprClass:
case Expr::ObjCPropertyRefExprClass:
case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::BlockExprClass:
case Expr::NoStmtClass:
case Expr::OpaqueValueExprClass:
case Expr::PackExpansionExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
case Expr::FunctionParmPackExprClass:
case Expr::AsTypeExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::MaterializeTemporaryExprClass:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
case Expr::LambdaExprClass:
case Expr::CXXFoldExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());
case Expr::InitListExprClass: {
// C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
// form "T x = { a };" is equivalent to "T x = a;".
// Unless we're initializing a reference, T is a scalar as it is known to be
// of integral or enumeration type.
if (E->isRValue() && (!Ctx.getLangOpts().HLSL || E->getType()->isScalarType() || IsHLSLVecInitList(E))) // HLSL Change
if (cast<InitListExpr>(E)->getNumInits() == 1)
return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
return ICEDiag(IK_NotICE, E->getLocStart());
}
case Expr::SizeOfPackExprClass:
case Expr::GNUNullExprClass:
// GCC considers the GNU __null value to be an integral constant expression.
return NoDiag();
case Expr::SubstNonTypeTemplateParmExprClass:
return
CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
case Expr::ParenExprClass:
return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
case Expr::GenericSelectionExprClass:
return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
case Expr::IntegerLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::ObjCBoolLiteralExprClass:
case Expr::CXXBoolLiteralExprClass:
case Expr::CXXScalarValueInitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::CXXNoexceptExprClass:
return NoDiag();
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass: {
// C99 6.6/3 allows function calls within unevaluated subexpressions of
// constant expressions, but they can never be ICEs because an ICE cannot
// contain an operand of (pointer to) function type.
const CallExpr *CE = cast<CallExpr>(E);
if (hlsl::IsIntrinsicOp(GetCallExprFunction(CE)) || CE->getBuiltinCallee()) // HLSL Change
return CheckEvalInICE(E, Ctx);
return ICEDiag(IK_NotICE, E->getLocStart());
}
case Expr::DeclRefExprClass: {
if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
return NoDiag();
const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
if (Ctx.getLangOpts().CPlusPlus &&
D && IsConstNonVolatile(D->getType())) {
// Parameter variables are never constants. Without this check,
// getAnyInitializer() can find a default argument, which leads
// to chaos.
if (isa<ParmVarDecl>(D))
return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
// C++ 7.1.5.1p2
// A variable of non-volatile const-qualified integral or enumeration
// type initialized by an ICE can be used in ICEs.
if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
if (!Dcl->getType()->isIntegralOrEnumerationType())
return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
// HLSL Change: cbuffer vars with init are not really constant in this way
if (Ctx.getLangOpts().HLSL &&
Dcl->hasGlobalStorage() &&
Dcl->getStorageClass() != SC_Static) {
return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
}
const VarDecl *VD;
// Look for a declaration of this variable that has an initializer, and
// check whether it is an ICE.
if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
return NoDiag();
else
return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
}
}
return ICEDiag(IK_NotICE, E->getLocStart());
}
case Expr::UnaryOperatorClass: {
const UnaryOperator *Exp = cast<UnaryOperator>(E);
switch (Exp->getOpcode()) {
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec:
case UO_AddrOf:
case UO_Deref:
// C99 6.6/3 allows increment and decrement within unevaluated
// subexpressions of constant expressions, but they can never be ICEs
// because an ICE cannot contain an lvalue operand.
return ICEDiag(IK_NotICE, E->getLocStart());
case UO_Extension:
case UO_LNot:
case UO_Plus:
case UO_Minus:
case UO_Not:
case UO_Real:
case UO_Imag:
return CheckICE(Exp->getSubExpr(), Ctx);
}
return CheckEvalInICE(E, Ctx); // HLSL Change - avoid dead-code fallthrough
}
case Expr::OffsetOfExprClass: {
// Note that per C99, offsetof must be an ICE. And AFAIK, using
// EvaluateAsRValue matches the proposed gcc behavior for cases like
// "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
// compliance: we should warn earlier for offsetof expressions with
// array subscripts that aren't ICEs, and if the array subscripts
// are ICEs, the value of the offsetof must be an integer constant.
return CheckEvalInICE(E, Ctx);
}
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
if ((Exp->getKind() == UETT_SizeOf) &&
Exp->getTypeOfArgument()->isVariableArrayType())
return ICEDiag(IK_NotICE, E->getLocStart());
return NoDiag();
}
case Expr::BinaryOperatorClass: {
const BinaryOperator *Exp = cast<BinaryOperator>(E);
switch (Exp->getOpcode()) {
case BO_PtrMemD:
case BO_PtrMemI:
case BO_Assign:
case BO_MulAssign:
case BO_DivAssign:
case BO_RemAssign:
case BO_AddAssign:
case BO_SubAssign:
case BO_ShlAssign:
case BO_ShrAssign:
case BO_AndAssign:
case BO_XorAssign:
case BO_OrAssign:
// C99 6.6/3 allows assignments within unevaluated subexpressions of
// constant expressions, but they can never be ICEs because an ICE cannot
// contain an lvalue operand.
return ICEDiag(IK_NotICE, E->getLocStart());
case BO_Mul:
case BO_Div:
case BO_Rem:
case BO_Add:
case BO_Sub:
case BO_Shl:
case BO_Shr:
case BO_LT:
case BO_GT:
case BO_LE:
case BO_GE:
case BO_EQ:
case BO_NE:
case BO_And:
case BO_Xor:
case BO_Or:
case BO_Comma: {
ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
if (Exp->getOpcode() == BO_Div ||
Exp->getOpcode() == BO_Rem) {
// EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
// we don't evaluate one.
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
if (REval == 0)
return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
if (REval.isSigned() && REval.isAllOnesValue()) {
llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
if (LEval.isMinSignedValue())
return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
}
}
}
if (Exp->getOpcode() == BO_Comma && !Ctx.getLangOpts().HLSL) { // HLSL Change: comma is allowed in ICE
if (Ctx.getLangOpts().C99) {
// C99 6.6p3 introduces a strange edge case: comma can be in an ICE
// if it isn't evaluated.
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
} else {
// In both C89 and C++, commas in ICEs are illegal.
return ICEDiag(IK_NotICE, E->getLocStart());
}
}
return Worst(LHSResult, RHSResult);
}
case BO_LAnd:
case BO_LOr: {
ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) {
// Rare case where the RHS has a comma "side-effect"; we need
// to actually check the condition to see whether the side
// with the comma is evaluated.
if ((Exp->getOpcode() == BO_LAnd) !=
(Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
return RHSResult;
return NoDiag();
}
return Worst(LHSResult, RHSResult);
}
}
llvm_unreachable("Invalid binary operator!");
}
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
if (isa<ExplicitCastExpr>(E)) {
if (const FloatingLiteral *FL
= dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
unsigned DestWidth = Ctx.getIntWidth(E->getType());
bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
APSInt IgnoredVal(DestWidth, !DestSigned);
bool Ignored;
// If the value does not fit in the destination type, the behavior is
// undefined, so we are not required to treat it as a constant
// expression.
if (FL->getValue().convertToInteger(IgnoredVal,
llvm::APFloat::rmTowardZero,
&Ignored) & APFloat::opInvalidOp)
return ICEDiag(IK_NotICE, E->getLocStart());
return NoDiag();
}
}
const CastExpr *CE = (const CastExpr*)(E);
switch (CE->getCastKind()) {
case CK_LValueToRValue:
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_IntegralToBoolean:
case CK_IntegralCast:
return CheckICE(SubExpr, Ctx);
default:
return ICEDiag(IK_NotICE, E->getLocStart());
}
}
case Expr::BinaryConditionalOperatorClass: {
const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
if (CommonResult.Kind == IK_NotICE) return CommonResult;
ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
if (FalseResult.Kind == IK_NotICE) return FalseResult;
if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
if (FalseResult.Kind == IK_ICEIfUnevaluated &&
Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
return FalseResult;
}
case Expr::ConditionalOperatorClass: {
const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
// If the condition (ignoring parens) is a __builtin_constant_p call,
// then only the true side is actually considered in an integer constant
// expression, and it is fully evaluated. This is an important GNU
// extension. See GCC PR38377 for discussion.
if (const CallExpr *CallCE
= dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
return CheckEvalInICE(E, Ctx);
ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
if (CondResult.Kind == IK_NotICE)
return CondResult;
ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
if (TrueResult.Kind == IK_NotICE)
return TrueResult;
if (FalseResult.Kind == IK_NotICE)
return FalseResult;
if (CondResult.Kind == IK_ICEIfUnevaluated)
return CondResult;
if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE)
return NoDiag();
// Rare case where the diagnostics depend on which side is evaluated
// Note that if we get here, CondResult is 0, and at least one of
// TrueResult and FalseResult is non-zero.
if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0)
return FalseResult;
return TrueResult;
}
case Expr::CXXDefaultArgExprClass:
return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
case Expr::CXXDefaultInitExprClass:
return CheckICE(cast<CXXDefaultInitExpr>(E)->getExpr(), Ctx);
case Expr::ChooseExprClass: {
return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
}
}
llvm_unreachable("Invalid StmtClass!");
}
/// Evaluate an expression as a C++11 integral constant expression.
static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
const Expr *E,
llvm::APSInt *Value,
SourceLocation *Loc) {
if (!E->getType()->isIntegralOrEnumerationType()) {
if (Loc) *Loc = E->getExprLoc();
return false;
}
APValue Result;
if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
return false;
if (!Result.isInt()) {
if (Loc) *Loc = E->getExprLoc();
return false;
}
if (Value) *Value = Result.getInt();
return true;
}
bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
SourceLocation *Loc) const {
// HLSL Change - if templates are enabled we need to act like C++11 here
if (Ctx.getLangOpts().CPlusPlus11 ||
Ctx.getLangOpts().HLSLVersion >= hlsl::LangStd::v2021)
return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc);
ICEDiag D = CheckICE(this, Ctx);
if (D.Kind != IK_ICE) {
if (Loc) *Loc = D.Loc;
return false;
}
return true;
}
bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
SourceLocation *Loc, bool isEvaluated) const {
// HLSL Change - if templates are enabled we need to act like C++11 here
if (Ctx.getLangOpts().CPlusPlus11 ||
Ctx.getLangOpts().HLSLVersion >= hlsl::LangStd::v2021)
return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
if (!isIntegerConstantExpr(Ctx, Loc))
return false;
if (!EvaluateAsInt(Value, Ctx))
llvm_unreachable("ICE cannot be evaluated!");
return true;
}
bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
return CheckICE(this, Ctx).Kind == IK_ICE;
}
bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result,
SourceLocation *Loc) const {
// We support this checking in C++98 mode in order to diagnose compatibility
// issues.
assert(Ctx.getLangOpts().CPlusPlus);
// Build evaluation settings.
Expr::EvalStatus Status;
SmallVector<PartialDiagnosticAt, 8> Diags;
Status.Diag = &Diags;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
APValue Scratch;
bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch);
if (!Diags.empty()) {
IsConstExpr = false;
if (Loc) *Loc = Diags[0].first;
} else if (!IsConstExpr) {
// FIXME: This shouldn't happen.
if (Loc) *Loc = getExprLoc();
}
return IsConstExpr;
}
bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
const FunctionDecl *Callee,
ArrayRef<const Expr*> Args) const {
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
ArgVector ArgValues(Args.size());
for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
if ((*I)->isValueDependent() ||
!Evaluate(ArgValues[I - Args.begin()], Info, *I))
// If evaluation fails, throw away the argument entirely.
ArgValues[I - Args.begin()] = APValue();
if (Info.EvalStatus.HasSideEffects)
return false;
}
// Build fake call to Callee.
CallStackFrame Frame(Info, Callee->getLocation(), Callee, /*This*/nullptr,
ArgValues.data());
return Evaluate(Value, Info, this) && !Info.EvalStatus.HasSideEffects;
}
bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
SmallVectorImpl<
PartialDiagnosticAt> &Diags) {
// FIXME: It would be useful to check constexpr function templates, but at the
// moment the constant expression evaluator cannot cope with the non-rigorous
// ASTs which we build for dependent expressions.
if (FD->isDependentContext())
return true;
Expr::EvalStatus Status;
Status.Diag = &Diags;
EvalInfo Info(FD->getASTContext(), Status,
EvalInfo::EM_PotentialConstantExpression);
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
// Fabricate an arbitrary expression on the stack and pretend that it
// is a temporary being used as the 'this' pointer.
LValue This;
ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
This.set(&VIE, Info.CurrentCall->Index);
ArrayRef<const Expr*> Args;
SourceLocation Loc = FD->getLocation();
APValue Scratch;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
// Evaluate the call as a constant initializer, to allow the construction
// of objects of non-literal types.
Info.setEvaluatingDecl(This.getLValueBase(), Scratch);
HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
} else
HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
Args, FD->getBody(), Info, Scratch);
return Diags.empty();
}
bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
const FunctionDecl *FD,
SmallVectorImpl<
PartialDiagnosticAt> &Diags) {
Expr::EvalStatus Status;
Status.Diag = &Diags;
EvalInfo Info(FD->getASTContext(), Status,
EvalInfo::EM_PotentialConstantExpressionUnevaluated);
// Fabricate a call stack frame to give the arguments a plausible cover story.
ArrayRef<const Expr*> Args;
ArgVector ArgValues(0);
bool Success = EvaluateArgs(Args, ArgValues, Info);
(void)Success;
assert(Success &&
"Failed to set up arguments for potential constant evaluation");
CallStackFrame Frame(Info, SourceLocation(), FD, nullptr, ArgValues.data());
APValue ResultScratch;
Evaluate(ResultScratch, Info, E);
return Diags.empty();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/TemplateName.cpp | //===--- TemplateName.cpp - C++ Template Name Representation---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the TemplateName interface and subclasses.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/TemplateName.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace llvm;
TemplateArgument
SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
return TemplateArgument(Arguments, size());
}
void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, Parameter, Replacement);
}
void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *parameter,
TemplateName replacement) {
ID.AddPointer(parameter);
ID.AddPointer(replacement.getAsVoidPointer());
}
void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
ASTContext &Context) {
Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
}
void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
ASTContext &Context,
TemplateTemplateParmDecl *Parameter,
const TemplateArgument &ArgPack) {
ID.AddPointer(Parameter);
ArgPack.Profile(ID, Context);
}
TemplateName::NameKind TemplateName::getKind() const {
if (Storage.is<TemplateDecl *>())
return Template;
if (Storage.is<DependentTemplateName *>())
return DependentTemplate;
if (Storage.is<QualifiedTemplateName *>())
return QualifiedTemplate;
UncommonTemplateNameStorage *uncommon
= Storage.get<UncommonTemplateNameStorage*>();
if (uncommon->getAsOverloadedStorage())
return OverloadedTemplate;
if (uncommon->getAsSubstTemplateTemplateParm())
return SubstTemplateTemplateParm;
return SubstTemplateTemplateParmPack;
}
TemplateDecl *TemplateName::getAsTemplateDecl() const {
if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
return Template;
if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
return QTN->getTemplateDecl();
if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
return sub->getReplacement().getAsTemplateDecl();
return nullptr;
}
bool TemplateName::isDependent() const {
if (TemplateDecl *Template = getAsTemplateDecl()) {
if (isa<TemplateTemplateParmDecl>(Template))
return true;
// FIXME: Hack, getDeclContext() can be null if Template is still
// initializing due to PCH reading, so we check it before using it.
// Should probably modify TemplateSpecializationType to allow constructing
// it without the isDependent() checking.
return Template->getDeclContext() &&
Template->getDeclContext()->isDependentContext();
}
assert(!getAsOverloadedTemplate() &&
"overloaded templates shouldn't survive to here");
return true;
}
bool TemplateName::isInstantiationDependent() const {
if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
if (QTN->getQualifier()->isInstantiationDependent())
return true;
}
return isDependent();
}
bool TemplateName::containsUnexpandedParameterPack() const {
if (TemplateDecl *Template = getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template))
return TTP->isParameterPack();
return false;
}
if (DependentTemplateName *DTN = getAsDependentTemplateName())
return DTN->getQualifier() &&
DTN->getQualifier()->containsUnexpandedParameterPack();
return getAsSubstTemplateTemplateParmPack() != nullptr;
}
void
TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
bool SuppressNNS) const {
if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
OS << *Template;
else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
if (!SuppressNNS)
QTN->getQualifier()->print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
OS << *QTN->getDecl();
} else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
if (!SuppressNNS && DTN->getQualifier())
DTN->getQualifier()->print(OS, Policy);
OS << "template ";
if (DTN->isIdentifier())
OS << DTN->getIdentifier()->getName();
else
OS << "operator " << getOperatorSpelling(DTN->getOperator());
} else if (SubstTemplateTemplateParmStorage *subst
= getAsSubstTemplateTemplateParm()) {
subst->getReplacement().print(OS, Policy, SuppressNNS);
} else if (SubstTemplateTemplateParmPackStorage *SubstPack
= getAsSubstTemplateTemplateParmPack())
OS << *SubstPack->getParameterPack();
else {
OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
(*OTS->begin())->printName(OS);
}
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
TemplateName N) {
std::string NameStr;
raw_string_ostream OS(NameStr);
LangOptions LO;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS
LO.CPlusPlus = true;
LO.Bool = true;
#endif
OS << '\'';
N.print(OS, PrintingPolicy(LO));
OS << '\'';
OS.flush();
return DB << NameStr;
}
void TemplateName::dump(raw_ostream &OS) const {
LangOptions LO; // FIXME!
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS
LO.CPlusPlus = true;
LO.Bool = true;
#endif
print(OS, PrintingPolicy(LO));
}
void TemplateName::dump() const {
dump(llvm::errs());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CommentSema.cpp | //===--- CommentSema.cpp - Doxygen comment semantic analysis --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentSema.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
namespace clang {
namespace comments {
namespace {
#include "clang/AST/CommentHTMLTagsProperties.inc"
} // unnamed namespace
Sema::Sema(llvm::BumpPtrAllocator &Allocator, const SourceManager &SourceMgr,
DiagnosticsEngine &Diags, CommandTraits &Traits,
const Preprocessor *PP) :
Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags), Traits(Traits),
PP(PP), ThisDeclInfo(nullptr), BriefCommand(nullptr),
HeaderfileCommand(nullptr) {
}
void Sema::setDecl(const Decl *D) {
if (!D)
return;
ThisDeclInfo = new (Allocator) DeclInfo;
ThisDeclInfo->CommentDecl = D;
ThisDeclInfo->IsFilled = false;
}
ParagraphComment *Sema::actOnParagraphComment(
ArrayRef<InlineContentComment *> Content) {
return new (Allocator) ParagraphComment(Content);
}
BlockCommandComment *Sema::actOnBlockCommandStart(
SourceLocation LocBegin,
SourceLocation LocEnd,
unsigned CommandID,
CommandMarkerKind CommandMarker) {
BlockCommandComment *BC = new (Allocator) BlockCommandComment(LocBegin, LocEnd,
CommandID,
CommandMarker);
checkContainerDecl(BC);
return BC;
}
void Sema::actOnBlockCommandArgs(BlockCommandComment *Command,
ArrayRef<BlockCommandComment::Argument> Args) {
Command->setArgs(Args);
}
void Sema::actOnBlockCommandFinish(BlockCommandComment *Command,
ParagraphComment *Paragraph) {
Command->setParagraph(Paragraph);
checkBlockCommandEmptyParagraph(Command);
checkBlockCommandDuplicate(Command);
if (ThisDeclInfo) {
// These checks only make sense if the comment is attached to a
// declaration.
checkReturnsCommand(Command);
checkDeprecatedCommand(Command);
}
}
ParamCommandComment *Sema::actOnParamCommandStart(
SourceLocation LocBegin,
SourceLocation LocEnd,
unsigned CommandID,
CommandMarkerKind CommandMarker) {
ParamCommandComment *Command =
new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID,
CommandMarker);
if (!isFunctionDecl())
Diag(Command->getLocation(),
diag::warn_doc_param_not_attached_to_a_function_decl)
<< CommandMarker
<< Command->getCommandNameRange(Traits);
return Command;
}
void Sema::checkFunctionDeclVerbatimLine(const BlockCommandComment *Comment) {
const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
if (!Info->IsFunctionDeclarationCommand)
return;
unsigned DiagSelect;
switch (Comment->getCommandID()) {
case CommandTraits::KCI_function:
DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 1 : 0;
break;
case CommandTraits::KCI_functiongroup:
DiagSelect = (!isAnyFunctionDecl() && !isFunctionTemplateDecl())? 2 : 0;
break;
case CommandTraits::KCI_method:
DiagSelect = !isObjCMethodDecl() ? 3 : 0;
break;
case CommandTraits::KCI_methodgroup:
DiagSelect = !isObjCMethodDecl() ? 4 : 0;
break;
case CommandTraits::KCI_callback:
DiagSelect = !isFunctionPointerVarDecl() ? 5 : 0;
break;
default:
DiagSelect = 0;
break;
}
if (DiagSelect)
Diag(Comment->getLocation(), diag::warn_doc_function_method_decl_mismatch)
<< Comment->getCommandMarker()
<< (DiagSelect-1) << (DiagSelect-1)
<< Comment->getSourceRange();
}
void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
if (!Info->IsRecordLikeDeclarationCommand)
return;
unsigned DiagSelect;
switch (Comment->getCommandID()) {
case CommandTraits::KCI_class:
DiagSelect = (!isClassOrStructDecl() && !isClassTemplateDecl()) ? 1 : 0;
// Allow @class command on @interface declarations.
// FIXME. Currently, \class and @class are indistinguishable. So,
// \class is also allowed on an @interface declaration
if (DiagSelect && Comment->getCommandMarker() && isObjCInterfaceDecl())
DiagSelect = 0;
break;
case CommandTraits::KCI_interface:
DiagSelect = !isObjCInterfaceDecl() ? 2 : 0;
break;
case CommandTraits::KCI_protocol:
DiagSelect = !isObjCProtocolDecl() ? 3 : 0;
break;
case CommandTraits::KCI_struct:
DiagSelect = !isClassOrStructDecl() ? 4 : 0;
break;
case CommandTraits::KCI_union:
DiagSelect = !isUnionDecl() ? 5 : 0;
break;
default:
DiagSelect = 0;
break;
}
if (DiagSelect)
Diag(Comment->getLocation(), diag::warn_doc_api_container_decl_mismatch)
<< Comment->getCommandMarker()
<< (DiagSelect-1) << (DiagSelect-1)
<< Comment->getSourceRange();
}
void Sema::checkContainerDecl(const BlockCommandComment *Comment) {
const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
if (!Info->IsRecordLikeDetailCommand || isRecordLikeDecl())
return;
unsigned DiagSelect;
switch (Comment->getCommandID()) {
case CommandTraits::KCI_classdesign:
DiagSelect = 1;
break;
case CommandTraits::KCI_coclass:
DiagSelect = 2;
break;
case CommandTraits::KCI_dependency:
DiagSelect = 3;
break;
case CommandTraits::KCI_helper:
DiagSelect = 4;
break;
case CommandTraits::KCI_helperclass:
DiagSelect = 5;
break;
case CommandTraits::KCI_helps:
DiagSelect = 6;
break;
case CommandTraits::KCI_instancesize:
DiagSelect = 7;
break;
case CommandTraits::KCI_ownership:
DiagSelect = 8;
break;
case CommandTraits::KCI_performance:
DiagSelect = 9;
break;
case CommandTraits::KCI_security:
DiagSelect = 10;
break;
case CommandTraits::KCI_superclass:
DiagSelect = 11;
break;
default:
DiagSelect = 0;
break;
}
if (DiagSelect)
Diag(Comment->getLocation(), diag::warn_doc_container_decl_mismatch)
<< Comment->getCommandMarker()
<< (DiagSelect-1)
<< Comment->getSourceRange();
}
/// \brief Turn a string into the corresponding PassDirection or -1 if it's not
/// valid.
static int getParamPassDirection(StringRef Arg) {
return llvm::StringSwitch<int>(Arg)
.Case("[in]", ParamCommandComment::In)
.Case("[out]", ParamCommandComment::Out)
.Cases("[in,out]", "[out,in]", ParamCommandComment::InOut)
.Default(-1);
}
void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg) {
std::string ArgLower = Arg.lower();
int Direction = getParamPassDirection(ArgLower);
if (Direction == -1) {
// Try again with whitespace removed.
ArgLower.erase(
std::remove_if(ArgLower.begin(), ArgLower.end(), clang::isWhitespace),
ArgLower.end());
Direction = getParamPassDirection(ArgLower);
SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
if (Direction != -1) {
const char *FixedName = ParamCommandComment::getDirectionAsString(
(ParamCommandComment::PassDirection)Direction);
Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction)
<< ArgRange << FixItHint::CreateReplacement(ArgRange, FixedName);
} else {
Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction) << ArgRange;
Direction = ParamCommandComment::In; // Sane fall back.
}
}
Command->setDirection((ParamCommandComment::PassDirection)Direction,
/*Explicit=*/true);
}
void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg) {
// Parser will not feed us more arguments than needed.
assert(Command->getNumArgs() == 0);
if (!Command->isDirectionExplicit()) {
// User didn't provide a direction argument.
Command->setDirection(ParamCommandComment::In, /* Explicit = */ false);
}
typedef BlockCommandComment::Argument Argument;
Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
ArgLocEnd),
Arg);
Command->setArgs(llvm::makeArrayRef(A, 1));
}
void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
ParagraphComment *Paragraph) {
Command->setParagraph(Paragraph);
checkBlockCommandEmptyParagraph(Command);
}
TParamCommandComment *Sema::actOnTParamCommandStart(
SourceLocation LocBegin,
SourceLocation LocEnd,
unsigned CommandID,
CommandMarkerKind CommandMarker) {
TParamCommandComment *Command =
new (Allocator) TParamCommandComment(LocBegin, LocEnd, CommandID,
CommandMarker);
if (!isTemplateOrSpecialization())
Diag(Command->getLocation(),
diag::warn_doc_tparam_not_attached_to_a_template_decl)
<< CommandMarker
<< Command->getCommandNameRange(Traits);
return Command;
}
void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg) {
// Parser will not feed us more arguments than needed.
assert(Command->getNumArgs() == 0);
typedef BlockCommandComment::Argument Argument;
Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
ArgLocEnd),
Arg);
Command->setArgs(llvm::makeArrayRef(A, 1));
if (!isTemplateOrSpecialization()) {
// We already warned that this \\tparam is not attached to a template decl.
return;
}
const TemplateParameterList *TemplateParameters =
ThisDeclInfo->TemplateParameters;
SmallVector<unsigned, 2> Position;
if (resolveTParamReference(Arg, TemplateParameters, &Position)) {
Command->setPosition(copyArray(llvm::makeArrayRef(Position)));
TParamCommandComment *&PrevCommand = TemplateParameterDocs[Arg];
if (PrevCommand) {
SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
Diag(ArgLocBegin, diag::warn_doc_tparam_duplicate)
<< Arg << ArgRange;
Diag(PrevCommand->getLocation(), diag::note_doc_tparam_previous)
<< PrevCommand->getParamNameRange();
}
PrevCommand = Command;
return;
}
SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
Diag(ArgLocBegin, diag::warn_doc_tparam_not_found)
<< Arg << ArgRange;
if (!TemplateParameters || TemplateParameters->size() == 0)
return;
StringRef CorrectedName;
if (TemplateParameters->size() == 1) {
const NamedDecl *Param = TemplateParameters->getParam(0);
const IdentifierInfo *II = Param->getIdentifier();
if (II)
CorrectedName = II->getName();
} else {
CorrectedName = correctTypoInTParamReference(Arg, TemplateParameters);
}
if (!CorrectedName.empty()) {
Diag(ArgLocBegin, diag::note_doc_tparam_name_suggestion)
<< CorrectedName
<< FixItHint::CreateReplacement(ArgRange, CorrectedName);
}
return;
}
void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
ParagraphComment *Paragraph) {
Command->setParagraph(Paragraph);
checkBlockCommandEmptyParagraph(Command);
}
InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
unsigned CommandID) {
ArrayRef<InlineCommandComment::Argument> Args;
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) InlineCommandComment(
CommandLocBegin,
CommandLocEnd,
CommandID,
getInlineCommandRenderKind(CommandName),
Args);
}
InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
unsigned CommandID,
SourceLocation ArgLocBegin,
SourceLocation ArgLocEnd,
StringRef Arg) {
typedef InlineCommandComment::Argument Argument;
Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
ArgLocEnd),
Arg);
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) InlineCommandComment(
CommandLocBegin,
CommandLocEnd,
CommandID,
getInlineCommandRenderKind(CommandName),
llvm::makeArrayRef(A, 1));
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
SourceLocation LocEnd,
StringRef CommandName) {
unsigned CommandID = Traits.registerUnknownCommand(CommandName)->getID();
return actOnUnknownCommand(LocBegin, LocEnd, CommandID);
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
SourceLocation LocEnd,
unsigned CommandID) {
ArrayRef<InlineCommandComment::Argument> Args;
return new (Allocator) InlineCommandComment(
LocBegin, LocEnd, CommandID,
InlineCommandComment::RenderNormal,
Args);
}
TextComment *Sema::actOnText(SourceLocation LocBegin,
SourceLocation LocEnd,
StringRef Text) {
return new (Allocator) TextComment(LocBegin, LocEnd, Text);
}
VerbatimBlockComment *Sema::actOnVerbatimBlockStart(SourceLocation Loc,
unsigned CommandID) {
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
return new (Allocator) VerbatimBlockComment(
Loc,
Loc.getLocWithOffset(1 + CommandName.size()),
CommandID);
}
VerbatimBlockLineComment *Sema::actOnVerbatimBlockLine(SourceLocation Loc,
StringRef Text) {
return new (Allocator) VerbatimBlockLineComment(Loc, Text);
}
void Sema::actOnVerbatimBlockFinish(
VerbatimBlockComment *Block,
SourceLocation CloseNameLocBegin,
StringRef CloseName,
ArrayRef<VerbatimBlockLineComment *> Lines) {
Block->setCloseName(CloseName, CloseNameLocBegin);
Block->setLines(Lines);
}
VerbatimLineComment *Sema::actOnVerbatimLine(SourceLocation LocBegin,
unsigned CommandID,
SourceLocation TextBegin,
StringRef Text) {
VerbatimLineComment *VL = new (Allocator) VerbatimLineComment(
LocBegin,
TextBegin.getLocWithOffset(Text.size()),
CommandID,
TextBegin,
Text);
checkFunctionDeclVerbatimLine(VL);
checkContainerDeclVerbatimLine(VL);
return VL;
}
HTMLStartTagComment *Sema::actOnHTMLStartTagStart(SourceLocation LocBegin,
StringRef TagName) {
return new (Allocator) HTMLStartTagComment(LocBegin, TagName);
}
void Sema::actOnHTMLStartTagFinish(
HTMLStartTagComment *Tag,
ArrayRef<HTMLStartTagComment::Attribute> Attrs,
SourceLocation GreaterLoc,
bool IsSelfClosing) {
Tag->setAttrs(Attrs);
Tag->setGreaterLoc(GreaterLoc);
if (IsSelfClosing)
Tag->setSelfClosing();
else if (!isHTMLEndTagForbidden(Tag->getTagName()))
HTMLOpenTags.push_back(Tag);
}
HTMLEndTagComment *Sema::actOnHTMLEndTag(SourceLocation LocBegin,
SourceLocation LocEnd,
StringRef TagName) {
HTMLEndTagComment *HET =
new (Allocator) HTMLEndTagComment(LocBegin, LocEnd, TagName);
if (isHTMLEndTagForbidden(TagName)) {
Diag(HET->getLocation(), diag::warn_doc_html_end_forbidden)
<< TagName << HET->getSourceRange();
HET->setIsMalformed();
return HET;
}
bool FoundOpen = false;
for (SmallVectorImpl<HTMLStartTagComment *>::const_reverse_iterator
I = HTMLOpenTags.rbegin(), E = HTMLOpenTags.rend();
I != E; ++I) {
if ((*I)->getTagName() == TagName) {
FoundOpen = true;
break;
}
}
if (!FoundOpen) {
Diag(HET->getLocation(), diag::warn_doc_html_end_unbalanced)
<< HET->getSourceRange();
HET->setIsMalformed();
return HET;
}
while (!HTMLOpenTags.empty()) {
HTMLStartTagComment *HST = HTMLOpenTags.pop_back_val();
StringRef LastNotClosedTagName = HST->getTagName();
if (LastNotClosedTagName == TagName) {
// If the start tag is malformed, end tag is malformed as well.
if (HST->isMalformed())
HET->setIsMalformed();
break;
}
if (isHTMLEndTagOptional(LastNotClosedTagName))
continue;
bool OpenLineInvalid;
const unsigned OpenLine = SourceMgr.getPresumedLineNumber(
HST->getLocation(),
&OpenLineInvalid);
bool CloseLineInvalid;
const unsigned CloseLine = SourceMgr.getPresumedLineNumber(
HET->getLocation(),
&CloseLineInvalid);
if (OpenLineInvalid || CloseLineInvalid || OpenLine == CloseLine) {
Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
<< HST->getTagName() << HET->getTagName()
<< HST->getSourceRange() << HET->getSourceRange();
HST->setIsMalformed();
} else {
Diag(HST->getLocation(), diag::warn_doc_html_start_end_mismatch)
<< HST->getTagName() << HET->getTagName()
<< HST->getSourceRange();
Diag(HET->getLocation(), diag::note_doc_html_end_tag)
<< HET->getSourceRange();
HST->setIsMalformed();
}
}
return HET;
}
FullComment *Sema::actOnFullComment(
ArrayRef<BlockContentComment *> Blocks) {
FullComment *FC = new (Allocator) FullComment(Blocks, ThisDeclInfo);
resolveParamCommandIndexes(FC);
// Complain about HTML tags that are not closed.
while (!HTMLOpenTags.empty()) {
HTMLStartTagComment *HST = HTMLOpenTags.pop_back_val();
if (isHTMLEndTagOptional(HST->getTagName()))
continue;
Diag(HST->getLocation(), diag::warn_doc_html_missing_end_tag)
<< HST->getTagName() << HST->getSourceRange();
HST->setIsMalformed();
}
return FC;
}
void Sema::checkBlockCommandEmptyParagraph(BlockCommandComment *Command) {
if (Traits.getCommandInfo(Command->getCommandID())->IsEmptyParagraphAllowed)
return;
ParagraphComment *Paragraph = Command->getParagraph();
if (Paragraph->isWhitespace()) {
SourceLocation DiagLoc;
if (Command->getNumArgs() > 0)
DiagLoc = Command->getArgRange(Command->getNumArgs() - 1).getEnd();
if (!DiagLoc.isValid())
DiagLoc = Command->getCommandNameRange(Traits).getEnd();
Diag(DiagLoc, diag::warn_doc_block_command_empty_paragraph)
<< Command->getCommandMarker()
<< Command->getCommandName(Traits)
<< Command->getSourceRange();
}
}
void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand)
return;
assert(ThisDeclInfo && "should not call this check on a bare comment");
if (isFunctionDecl()) {
if (ThisDeclInfo->ReturnType->isVoidType()) {
unsigned DiagKind;
switch (ThisDeclInfo->CommentDecl->getKind()) {
default:
if (ThisDeclInfo->IsObjCMethod)
DiagKind = 3;
else
DiagKind = 0;
break;
case Decl::CXXConstructor:
DiagKind = 1;
break;
case Decl::CXXDestructor:
DiagKind = 2;
break;
}
Diag(Command->getLocation(),
diag::warn_doc_returns_attached_to_a_void_function)
<< Command->getCommandMarker()
<< Command->getCommandName(Traits)
<< DiagKind
<< Command->getSourceRange();
}
return;
}
else if (isObjCPropertyDecl())
return;
Diag(Command->getLocation(),
diag::warn_doc_returns_not_attached_to_a_function_decl)
<< Command->getCommandMarker()
<< Command->getCommandName(Traits)
<< Command->getSourceRange();
}
void Sema::checkBlockCommandDuplicate(const BlockCommandComment *Command) {
const CommandInfo *Info = Traits.getCommandInfo(Command->getCommandID());
const BlockCommandComment *PrevCommand = nullptr;
if (Info->IsBriefCommand) {
if (!BriefCommand) {
BriefCommand = Command;
return;
}
PrevCommand = BriefCommand;
} else if (Info->IsHeaderfileCommand) {
if (!HeaderfileCommand) {
HeaderfileCommand = Command;
return;
}
PrevCommand = HeaderfileCommand;
} else {
// We don't want to check this command for duplicates.
return;
}
StringRef CommandName = Command->getCommandName(Traits);
StringRef PrevCommandName = PrevCommand->getCommandName(Traits);
Diag(Command->getLocation(), diag::warn_doc_block_command_duplicate)
<< Command->getCommandMarker()
<< CommandName
<< Command->getSourceRange();
if (CommandName == PrevCommandName)
Diag(PrevCommand->getLocation(), diag::note_doc_block_command_previous)
<< PrevCommand->getCommandMarker()
<< PrevCommandName
<< PrevCommand->getSourceRange();
else
Diag(PrevCommand->getLocation(),
diag::note_doc_block_command_previous_alias)
<< PrevCommand->getCommandMarker()
<< PrevCommandName
<< CommandName;
}
void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand)
return;
assert(ThisDeclInfo && "should not call this check on a bare comment");
const Decl *D = ThisDeclInfo->CommentDecl;
if (!D)
return;
if (D->hasAttr<DeprecatedAttr>() ||
D->hasAttr<AvailabilityAttr>() ||
D->hasAttr<UnavailableAttr>())
return;
Diag(Command->getLocation(),
diag::warn_doc_deprecated_not_sync)
<< Command->getSourceRange();
// Try to emit a fixit with a deprecation attribute.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// Don't emit a Fix-It for non-member function definitions. GCC does not
// accept attributes on them.
const DeclContext *Ctx = FD->getDeclContext();
if ((!Ctx || !Ctx->isRecord()) &&
FD->doesThisDeclarationHaveABody())
return;
StringRef AttributeSpelling = "__attribute__((deprecated))";
if (PP) {
TokenValue Tokens[] = {
tok::kw___attribute, tok::l_paren, tok::l_paren,
PP->getIdentifierInfo("deprecated"),
tok::r_paren, tok::r_paren
};
StringRef MacroName = PP->getLastMacroWithSpelling(FD->getLocation(),
Tokens);
if (!MacroName.empty())
AttributeSpelling = MacroName;
}
SmallString<64> TextToInsert(" ");
TextToInsert += AttributeSpelling;
Diag(FD->getLocEnd(),
diag::note_add_deprecation_attr)
<< FixItHint::CreateInsertion(FD->getLocEnd().getLocWithOffset(1),
TextToInsert);
}
}
void Sema::resolveParamCommandIndexes(const FullComment *FC) {
if (!isFunctionDecl()) {
// We already warned that \\param commands are not attached to a function
// decl.
return;
}
SmallVector<ParamCommandComment *, 8> UnresolvedParamCommands;
// Comment AST nodes that correspond to \c ParamVars for which we have
// found a \\param command or NULL if no documentation was found so far.
SmallVector<ParamCommandComment *, 8> ParamVarDocs;
ArrayRef<const ParmVarDecl *> ParamVars = getParamVars();
ParamVarDocs.resize(ParamVars.size(), nullptr);
// First pass over all \\param commands: resolve all parameter names.
for (Comment::child_iterator I = FC->child_begin(), E = FC->child_end();
I != E; ++I) {
ParamCommandComment *PCC = dyn_cast<ParamCommandComment>(*I);
if (!PCC || !PCC->hasParamName())
continue;
StringRef ParamName = PCC->getParamNameAsWritten();
// Check that referenced parameter name is in the function decl.
const unsigned ResolvedParamIndex = resolveParmVarReference(ParamName,
ParamVars);
if (ResolvedParamIndex == ParamCommandComment::VarArgParamIndex) {
PCC->setIsVarArgParam();
continue;
}
if (ResolvedParamIndex == ParamCommandComment::InvalidParamIndex) {
UnresolvedParamCommands.push_back(PCC);
continue;
}
PCC->setParamIndex(ResolvedParamIndex);
if (ParamVarDocs[ResolvedParamIndex]) {
SourceRange ArgRange = PCC->getParamNameRange();
Diag(ArgRange.getBegin(), diag::warn_doc_param_duplicate)
<< ParamName << ArgRange;
ParamCommandComment *PrevCommand = ParamVarDocs[ResolvedParamIndex];
Diag(PrevCommand->getLocation(), diag::note_doc_param_previous)
<< PrevCommand->getParamNameRange();
}
ParamVarDocs[ResolvedParamIndex] = PCC;
}
// Find parameter declarations that have no corresponding \\param.
SmallVector<const ParmVarDecl *, 8> OrphanedParamDecls;
for (unsigned i = 0, e = ParamVarDocs.size(); i != e; ++i) {
if (!ParamVarDocs[i])
OrphanedParamDecls.push_back(ParamVars[i]);
}
// Second pass over unresolved \\param commands: do typo correction.
// Suggest corrections from a set of parameter declarations that have no
// corresponding \\param.
for (unsigned i = 0, e = UnresolvedParamCommands.size(); i != e; ++i) {
const ParamCommandComment *PCC = UnresolvedParamCommands[i];
SourceRange ArgRange = PCC->getParamNameRange();
StringRef ParamName = PCC->getParamNameAsWritten();
Diag(ArgRange.getBegin(), diag::warn_doc_param_not_found)
<< ParamName << ArgRange;
// All parameters documented -- can't suggest a correction.
if (OrphanedParamDecls.size() == 0)
continue;
unsigned CorrectedParamIndex = ParamCommandComment::InvalidParamIndex;
if (OrphanedParamDecls.size() == 1) {
// If one parameter is not documented then that parameter is the only
// possible suggestion.
CorrectedParamIndex = 0;
} else {
// Do typo correction.
CorrectedParamIndex = correctTypoInParmVarReference(ParamName,
OrphanedParamDecls);
}
if (CorrectedParamIndex != ParamCommandComment::InvalidParamIndex) {
const ParmVarDecl *CorrectedPVD = OrphanedParamDecls[CorrectedParamIndex];
if (const IdentifierInfo *CorrectedII = CorrectedPVD->getIdentifier())
Diag(ArgRange.getBegin(), diag::note_doc_param_name_suggestion)
<< CorrectedII->getName()
<< FixItHint::CreateReplacement(ArgRange, CorrectedII->getName());
}
}
}
bool Sema::isFunctionDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->getKind() == DeclInfo::FunctionKind;
}
bool Sema::isAnyFunctionDecl() {
return isFunctionDecl() && ThisDeclInfo->CurrentDecl &&
isa<FunctionDecl>(ThisDeclInfo->CurrentDecl);
}
bool Sema::isFunctionOrMethodVariadic() {
if (!isAnyFunctionDecl() && !isObjCMethodDecl() && !isFunctionTemplateDecl())
return false;
if (const FunctionDecl *FD =
dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl))
return FD->isVariadic();
if (const FunctionTemplateDecl *FTD =
dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl))
return FTD->getTemplatedDecl()->isVariadic();
if (const ObjCMethodDecl *MD =
dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl))
return MD->isVariadic();
return false;
}
bool Sema::isObjCMethodDecl() {
return isFunctionDecl() && ThisDeclInfo->CurrentDecl &&
isa<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl);
}
bool Sema::isFunctionPointerVarDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
if (ThisDeclInfo->getKind() == DeclInfo::VariableKind) {
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDeclInfo->CurrentDecl)) {
QualType QT = VD->getType();
return QT->isFunctionPointerType();
}
}
return false;
}
bool Sema::isObjCPropertyDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl->getKind() == Decl::ObjCProperty;
}
bool Sema::isTemplateOrSpecialization() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->getTemplateKind() != DeclInfo::NotTemplate;
}
bool Sema::isRecordLikeDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return isUnionDecl() || isClassOrStructDecl() || isObjCInterfaceDecl() ||
isObjCProtocolDecl();
}
bool Sema::isUnionDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
if (const RecordDecl *RD =
dyn_cast_or_null<RecordDecl>(ThisDeclInfo->CurrentDecl))
return RD->isUnion();
return false;
}
bool Sema::isClassOrStructDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
!isUnionDecl();
}
bool Sema::isClassTemplateDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
(isa<ClassTemplateDecl>(ThisDeclInfo->CurrentDecl));
}
bool Sema::isFunctionTemplateDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
(isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
}
bool Sema::isObjCInterfaceDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
isa<ObjCInterfaceDecl>(ThisDeclInfo->CurrentDecl);
}
bool Sema::isObjCProtocolDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
isa<ObjCProtocolDecl>(ThisDeclInfo->CurrentDecl);
}
ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->ParamVars;
}
void Sema::inspectThisDecl() {
ThisDeclInfo->fill();
}
unsigned Sema::resolveParmVarReference(StringRef Name,
ArrayRef<const ParmVarDecl *> ParamVars) {
for (unsigned i = 0, e = ParamVars.size(); i != e; ++i) {
const IdentifierInfo *II = ParamVars[i]->getIdentifier();
if (II && II->getName() == Name)
return i;
}
if (Name == "..." && isFunctionOrMethodVariadic())
return ParamCommandComment::VarArgParamIndex;
return ParamCommandComment::InvalidParamIndex;
}
namespace {
class SimpleTypoCorrector {
StringRef Typo;
const unsigned MaxEditDistance;
const NamedDecl *BestDecl;
unsigned BestEditDistance;
unsigned BestIndex;
unsigned NextIndex;
public:
SimpleTypoCorrector(StringRef Typo) :
Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3),
BestDecl(nullptr), BestEditDistance(MaxEditDistance + 1),
BestIndex(0), NextIndex(0)
{ }
void addDecl(const NamedDecl *ND);
const NamedDecl *getBestDecl() const {
if (BestEditDistance > MaxEditDistance)
return nullptr;
return BestDecl;
}
unsigned getBestDeclIndex() const {
assert(getBestDecl());
return BestIndex;
}
};
void SimpleTypoCorrector::addDecl(const NamedDecl *ND) {
unsigned CurrIndex = NextIndex++;
const IdentifierInfo *II = ND->getIdentifier();
if (!II)
return;
StringRef Name = II->getName();
unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
if (MinPossibleEditDistance > 0 &&
Typo.size() / MinPossibleEditDistance < 3)
return;
unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance);
if (EditDistance < BestEditDistance) {
BestEditDistance = EditDistance;
BestDecl = ND;
BestIndex = CurrIndex;
}
}
} // unnamed namespace
unsigned Sema::correctTypoInParmVarReference(
StringRef Typo,
ArrayRef<const ParmVarDecl *> ParamVars) {
SimpleTypoCorrector Corrector(Typo);
for (unsigned i = 0, e = ParamVars.size(); i != e; ++i)
Corrector.addDecl(ParamVars[i]);
if (Corrector.getBestDecl())
return Corrector.getBestDeclIndex();
else
return ParamCommandComment::InvalidParamIndex;
}
namespace {
bool ResolveTParamReferenceHelper(
StringRef Name,
const TemplateParameterList *TemplateParameters,
SmallVectorImpl<unsigned> *Position) {
for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
const NamedDecl *Param = TemplateParameters->getParam(i);
const IdentifierInfo *II = Param->getIdentifier();
if (II && II->getName() == Name) {
Position->push_back(i);
return true;
}
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(Param)) {
Position->push_back(i);
if (ResolveTParamReferenceHelper(Name, TTP->getTemplateParameters(),
Position))
return true;
Position->pop_back();
}
}
return false;
}
} // unnamed namespace
bool Sema::resolveTParamReference(
StringRef Name,
const TemplateParameterList *TemplateParameters,
SmallVectorImpl<unsigned> *Position) {
Position->clear();
if (!TemplateParameters)
return false;
return ResolveTParamReferenceHelper(Name, TemplateParameters, Position);
}
namespace {
void CorrectTypoInTParamReferenceHelper(
const TemplateParameterList *TemplateParameters,
SimpleTypoCorrector &Corrector) {
for (unsigned i = 0, e = TemplateParameters->size(); i != e; ++i) {
const NamedDecl *Param = TemplateParameters->getParam(i);
Corrector.addDecl(Param);
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(Param))
CorrectTypoInTParamReferenceHelper(TTP->getTemplateParameters(),
Corrector);
}
}
} // unnamed namespace
StringRef Sema::correctTypoInTParamReference(
StringRef Typo,
const TemplateParameterList *TemplateParameters) {
SimpleTypoCorrector Corrector(Typo);
CorrectTypoInTParamReferenceHelper(TemplateParameters, Corrector);
if (const NamedDecl *ND = Corrector.getBestDecl()) {
const IdentifierInfo *II = ND->getIdentifier();
assert(II && "SimpleTypoCorrector should not return this decl");
return II->getName();
}
return StringRef();
}
InlineCommandComment::RenderKind
Sema::getInlineCommandRenderKind(StringRef Name) const {
assert(Traits.getCommandInfo(Name)->IsInlineCommand);
return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name)
.Case("b", InlineCommandComment::RenderBold)
.Cases("c", "p", InlineCommandComment::RenderMonospaced)
.Cases("a", "e", "em", InlineCommandComment::RenderEmphasized)
.Default(InlineCommandComment::RenderNormal);
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTTypeTraits.cpp | //===--- ASTTypeTraits.cpp --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Provides a dynamic type identifier and a dynamically typed node container
// that can be used to store an AST base node at runtime in the same storage in
// a type safe way.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
namespace clang {
namespace ast_type_traits {
const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "<None>" },
{ NKI_None, "CXXCtorInitializer" },
{ NKI_None, "TemplateArgument" },
{ NKI_None, "NestedNameSpecifier" },
{ NKI_None, "NestedNameSpecifierLoc" },
{ NKI_None, "QualType" },
{ NKI_None, "TypeLoc" },
{ NKI_None, "Decl" },
#define DECL(DERIVED, BASE) { NKI_##BASE, #DERIVED "Decl" },
#include "clang/AST/DeclNodes.inc"
{ NKI_None, "Stmt" },
#define STMT(DERIVED, BASE) { NKI_##BASE, #DERIVED },
#include "clang/AST/StmtNodes.inc"
{ NKI_None, "Type" },
#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
#include "clang/AST/TypeNodes.def"
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
return isBaseOf(KindId, Other.KindId, Distance);
}
bool ASTNodeKind::isSame(ASTNodeKind Other) const {
return KindId != NKI_None && KindId == Other.KindId;
}
bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
unsigned *Distance) {
if (Base == NKI_None || Derived == NKI_None) return false;
unsigned Dist = 0;
while (Derived != Base && Derived != NKI_None) {
Derived = AllKindInfo[Derived].ParentId;
++Dist;
}
if (Distance)
*Distance = Dist;
return Derived == Base;
}
StringRef ASTNodeKind::asStringRef() const { return AllKindInfo[KindId].Name; }
ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
ASTNodeKind Kind2) {
if (Kind1.isBaseOf(Kind2)) return Kind2;
if (Kind2.isBaseOf(Kind1)) return Kind1;
return ASTNodeKind();
}
ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1,
ASTNodeKind Kind2) {
NodeKindId Parent = Kind1.KindId;
while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) {
Parent = AllKindInfo[Parent].ParentId;
}
return ASTNodeKind(Parent);
}
ASTNodeKind ASTNodeKind::getFromNode(const Decl &D) {
switch (D.getKind()) {
#define DECL(DERIVED, BASE) \
case Decl::DERIVED: return ASTNodeKind(NKI_##DERIVED##Decl);
#define ABSTRACT_DECL(D)
#include "clang/AST/DeclNodes.inc"
};
llvm_unreachable("invalid decl kind");
}
ASTNodeKind ASTNodeKind::getFromNode(const Stmt &S) {
switch (S.getStmtClass()) {
case Stmt::NoStmtClass: return NKI_None;
#define STMT(CLASS, PARENT) \
case Stmt::CLASS##Class: return ASTNodeKind(NKI_##CLASS);
#define ABSTRACT_STMT(S)
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("invalid stmt kind");
}
ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
switch (T.getTypeClass()) {
#define TYPE(Class, Base) \
case Type::Class: return ASTNodeKind(NKI_##Class##Type);
#define ABSTRACT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
}
llvm_unreachable("invalid type kind");
}
void DynTypedNode::print(llvm::raw_ostream &OS,
const PrintingPolicy &PP) const {
if (const TemplateArgument *TA = get<TemplateArgument>())
TA->print(PP, OS);
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
NNS->print(OS, PP);
else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
NNSL->getNestedNameSpecifier()->print(OS, PP);
else if (const QualType *QT = get<QualType>())
QT->print(OS, PP);
else if (const TypeLoc *TL = get<TypeLoc>())
TL->getType().print(OS, PP);
else if (const Decl *D = get<Decl>())
D->print(OS, PP);
else if (const Stmt *S = get<Stmt>())
S->printPretty(OS, nullptr, PP);
else if (const Type *T = get<Type>())
QualType(T, 0).print(OS, PP);
else
OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
}
void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const {
if (const Decl *D = get<Decl>())
D->dump(OS);
else if (const Stmt *S = get<Stmt>())
S->dump(OS, SM);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
SourceRange DynTypedNode::getSourceRange() const {
if (const CXXCtorInitializer *CCI = get<CXXCtorInitializer>())
return CCI->getSourceRange();
if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
return NNSL->getSourceRange();
if (const TypeLoc *TL = get<TypeLoc>())
return TL->getSourceRange();
if (const Decl *D = get<Decl>())
return D->getSourceRange();
if (const Stmt *S = get<Stmt>())
return S->getSourceRange();
return SourceRange();
}
} // end namespace ast_type_traits
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/MicrosoftCXXABI.cpp | //===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides C++ AST support targeting the Microsoft Visual C++
// ABI.
//
//===----------------------------------------------------------------------===//
#include "CXXABI.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
using namespace clang;
namespace {
/// \brief Numbers things which need to correspond across multiple TUs.
/// Typically these are things like static locals, lambdas, or blocks.
class MicrosoftNumberingContext : public MangleNumberingContext {
llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
unsigned LambdaManglingNumber;
unsigned StaticLocalNumber;
unsigned StaticThreadlocalNumber;
public:
MicrosoftNumberingContext()
: MangleNumberingContext(), LambdaManglingNumber(0),
StaticLocalNumber(0), StaticThreadlocalNumber(0) {}
unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
return ++LambdaManglingNumber;
}
unsigned getManglingNumber(const BlockDecl *BD) override {
const Type *Ty = nullptr;
return ++ManglingNumbers[Ty];
}
unsigned getStaticLocalNumber(const VarDecl *VD) override {
if (VD->getTLSKind())
return ++StaticThreadlocalNumber;
return ++StaticLocalNumber;
}
unsigned getManglingNumber(const VarDecl *VD,
unsigned MSLocalManglingNumber) override {
return MSLocalManglingNumber;
}
unsigned getManglingNumber(const TagDecl *TD,
unsigned MSLocalManglingNumber) override {
return MSLocalManglingNumber;
}
};
class MicrosoftCXXABI : public CXXABI {
ASTContext &Context;
llvm::SmallDenseMap<CXXRecordDecl *, CXXConstructorDecl *> RecordToCopyCtor;
llvm::SmallDenseMap<std::pair<const CXXConstructorDecl *, unsigned>, Expr *>
CtorToDefaultArgExpr;
public:
MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
std::pair<uint64_t, unsigned>
getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override;
CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
if (!isVariadic &&
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
return CC_C;
}
bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
// FIXME: Audit the corners
if (!RD->isDynamicClass())
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// In the Microsoft ABI, classes can have one or two vtable pointers.
CharUnits PointerSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
return Layout.getNonVirtualSize() == PointerSize ||
Layout.getNonVirtualSize() == PointerSize * 2;
}
void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx, Expr *DAE) override {
CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)] = DAE;
}
Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx) override {
return CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)];
}
const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override {
return RecordToCopyCtor[RD];
}
void
addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
CXXConstructorDecl *CD) override {
assert(CD != nullptr);
assert(RecordToCopyCtor[RD] == nullptr || RecordToCopyCtor[RD] == CD);
RecordToCopyCtor[RD] = CD;
}
MangleNumberingContext *createMangleNumberingContext() const override {
return new MicrosoftNumberingContext();
}
};
}
// getNumBases() seems to only give us the number of direct bases, and not the
// total. This function tells us if we inherit from anybody that uses MI, or if
// we have a non-primary base class, which uses the multiple inheritance model.
static bool usesMultipleInheritanceModel(const CXXRecordDecl *RD) {
while (RD->getNumBases() > 0) {
if (RD->getNumBases() > 1)
return true;
assert(RD->getNumBases() == 1);
const CXXRecordDecl *Base =
RD->bases_begin()->getType()->getAsCXXRecordDecl();
if (RD->isPolymorphic() && !Base->isPolymorphic())
return true;
RD = Base;
}
return false;
}
MSInheritanceAttr::Spelling CXXRecordDecl::calculateInheritanceModel() const {
if (!hasDefinition() || isParsingBaseSpecifiers())
return MSInheritanceAttr::Keyword_unspecified_inheritance;
if (getNumVBases() > 0)
return MSInheritanceAttr::Keyword_virtual_inheritance;
if (usesMultipleInheritanceModel(this))
return MSInheritanceAttr::Keyword_multiple_inheritance;
return MSInheritanceAttr::Keyword_single_inheritance;
}
MSInheritanceAttr::Spelling
CXXRecordDecl::getMSInheritanceModel() const {
MSInheritanceAttr *IA = getAttr<MSInheritanceAttr>();
assert(IA && "Expected MSInheritanceAttr on the CXXRecordDecl!");
return IA->getSemanticSpelling();
}
MSVtorDispAttr::Mode CXXRecordDecl::getMSVtorDispMode() const {
if (MSVtorDispAttr *VDA = getAttr<MSVtorDispAttr>())
return VDA->getVtorDispMode();
return MSVtorDispAttr::Mode(getASTContext().getLangOpts().VtorDispMode);
}
// Returns the number of pointer and integer slots used to represent a member
// pointer in the MS C++ ABI.
//
// Member function pointers have the following general form; however, fields
// are dropped as permitted (under the MSVC interpretation) by the inheritance
// model of the actual class.
//
// struct {
// // A pointer to the member function to call. If the member function is
// // virtual, this will be a thunk that forwards to the appropriate vftable
// // slot.
// void *FunctionPointerOrVirtualThunk;
//
// // An offset to add to the address of the vbtable pointer after
// // (possibly) selecting the virtual base but before resolving and calling
// // the function.
// // Only needed if the class has any virtual bases or bases at a non-zero
// // offset.
// int NonVirtualBaseAdjustment;
//
// // The offset of the vb-table pointer within the object. Only needed for
// // incomplete types.
// int VBPtrOffset;
//
// // An offset within the vb-table that selects the virtual base containing
// // the member. Loading from this offset produces a new offset that is
// // added to the address of the vb-table pointer to produce the base.
// int VirtualBaseAdjustmentOffset;
// };
static std::pair<unsigned, unsigned>
getMSMemberPointerSlots(const MemberPointerType *MPT) {
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
unsigned Ptrs = 0;
unsigned Ints = 0;
if (MPT->isMemberFunctionPointer())
Ptrs = 1;
else
Ints = 1;
if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
Inheritance))
Ints++;
if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
Ints++;
if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
Ints++;
return std::make_pair(Ptrs, Ints);
}
std::pair<uint64_t, unsigned> MicrosoftCXXABI::getMemberPointerWidthAndAlign(
const MemberPointerType *MPT) const {
// The nominal struct is laid out with pointers followed by ints and aligned
// to a pointer width if any are present and an int width otherwise.
const TargetInfo &Target = Context.getTargetInfo();
unsigned PtrSize = Target.getPointerWidth(0);
unsigned IntSize = Target.getIntWidth();
unsigned Ptrs, Ints;
std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
uint64_t Width = Ptrs * PtrSize + Ints * IntSize;
unsigned Align;
// When MSVC does x86_32 record layout, it aligns aggregate member pointers to
// 8 bytes. However, __alignof usually returns 4 for data memptrs and 8 for
// function memptrs.
if (Ptrs + Ints > 1 && Target.getTriple().isArch32Bit())
Align = 64;
else if (Ptrs)
Align = Target.getPointerAlign(0);
else
Align = Target.getIntAlign();
if (Target.getTriple().isArch64Bit())
Width = llvm::RoundUpToAlignment(Width, Align);
return std::make_pair(Width, Align);
}
CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) {
return new MicrosoftCXXABI(Ctx);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/RawCommentList.cpp | //===--- RawCommentList.cpp - Processing raw comments -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/RawCommentList.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Comment.h"
#include "clang/AST/CommentBriefParser.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentLexer.h"
#include "clang/AST/CommentParser.h"
#include "clang/AST/CommentSema.h"
#include "llvm/ADT/STLExtras.h"
using namespace clang;
namespace {
/// Get comment kind and bool describing if it is a trailing comment.
std::pair<RawComment::CommentKind, bool> getCommentKind(StringRef Comment,
bool ParseAllComments) {
const size_t MinCommentLength = ParseAllComments ? 2 : 3;
if ((Comment.size() < MinCommentLength) || Comment[0] != '/')
return std::make_pair(RawComment::RCK_Invalid, false);
RawComment::CommentKind K;
if (Comment[1] == '/') {
if (Comment.size() < 3)
return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
if (Comment[2] == '/')
K = RawComment::RCK_BCPLSlash;
else if (Comment[2] == '!')
K = RawComment::RCK_BCPLExcl;
else
return std::make_pair(RawComment::RCK_OrdinaryBCPL, false);
} else {
assert(Comment.size() >= 4);
// Comment lexer does not understand escapes in comment markers, so pretend
// that this is not a comment.
if (Comment[1] != '*' ||
Comment[Comment.size() - 2] != '*' ||
Comment[Comment.size() - 1] != '/')
return std::make_pair(RawComment::RCK_Invalid, false);
if (Comment[2] == '*')
K = RawComment::RCK_JavaDoc;
else if (Comment[2] == '!')
K = RawComment::RCK_Qt;
else
return std::make_pair(RawComment::RCK_OrdinaryC, false);
}
const bool TrailingComment = (Comment.size() > 3) && (Comment[3] == '<');
return std::make_pair(K, TrailingComment);
}
bool mergedCommentIsTrailingComment(StringRef Comment) {
return (Comment.size() > 3) && (Comment[3] == '<');
}
} // unnamed namespace
RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
bool Merged, bool ParseAllComments) :
Range(SR), RawTextValid(false), BriefTextValid(false),
IsAttached(false), IsAlmostTrailingComment(false),
ParseAllComments(ParseAllComments) {
// Extract raw comment text, if possible.
if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
Kind = RCK_Invalid;
return;
}
if (!Merged) {
// Guess comment kind.
std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
Kind = K.first;
IsTrailingComment = K.second;
IsAlmostTrailingComment = RawText.startswith("//<") ||
RawText.startswith("/*<");
} else {
Kind = RCK_Merged;
IsTrailingComment = mergedCommentIsTrailingComment(RawText);
}
}
StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const {
FileID BeginFileID;
FileID EndFileID;
unsigned BeginOffset;
unsigned EndOffset;
std::tie(BeginFileID, BeginOffset) =
SourceMgr.getDecomposedLoc(Range.getBegin());
std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd());
const unsigned Length = EndOffset - BeginOffset;
if (Length < 2)
return StringRef();
// The comment can't begin in one file and end in another.
assert(BeginFileID == EndFileID);
bool Invalid = false;
const char *BufferStart = SourceMgr.getBufferData(BeginFileID,
&Invalid).data();
if (Invalid)
return StringRef();
return StringRef(BufferStart + BeginOffset, Length);
}
const char *RawComment::extractBriefText(const ASTContext &Context) const {
// Make sure that RawText is valid.
getRawText(Context.getSourceManager());
// Since we will be copying the resulting text, all allocations made during
// parsing are garbage after resulting string is formed. Thus we can use
// a separate allocator for all temporary stuff.
llvm::BumpPtrAllocator Allocator;
comments::Lexer L(Allocator, Context.getDiagnostics(),
Context.getCommentCommandTraits(),
Range.getBegin(),
RawText.begin(), RawText.end());
comments::BriefParser P(L, Context.getCommentCommandTraits());
const std::string Result = P.Parse();
const unsigned BriefTextLength = Result.size();
char *BriefTextPtr = new (Context) char[BriefTextLength + 1];
memcpy(BriefTextPtr, Result.c_str(), BriefTextLength + 1);
BriefText = BriefTextPtr;
BriefTextValid = true;
return BriefTextPtr;
}
comments::FullComment *RawComment::parse(const ASTContext &Context,
const Preprocessor *PP,
const Decl *D) const {
// Make sure that RawText is valid.
getRawText(Context.getSourceManager());
comments::Lexer L(Context.getAllocator(), Context.getDiagnostics(),
Context.getCommentCommandTraits(),
getSourceRange().getBegin(),
RawText.begin(), RawText.end());
comments::Sema S(Context.getAllocator(), Context.getSourceManager(),
Context.getDiagnostics(),
Context.getCommentCommandTraits(),
PP);
S.setDecl(D);
comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(),
Context.getDiagnostics(),
Context.getCommentCommandTraits());
return P.parseFullComment();
}
static bool onlyWhitespaceBetween(SourceManager &SM,
SourceLocation Loc1, SourceLocation Loc2,
unsigned MaxNewlinesAllowed) {
std::pair<FileID, unsigned> Loc1Info = SM.getDecomposedLoc(Loc1);
std::pair<FileID, unsigned> Loc2Info = SM.getDecomposedLoc(Loc2);
// Question does not make sense if locations are in different files.
if (Loc1Info.first != Loc2Info.first)
return false;
bool Invalid = false;
const char *Buffer = SM.getBufferData(Loc1Info.first, &Invalid).data();
if (Invalid)
return false;
unsigned NumNewlines = 0;
assert(Loc1Info.second <= Loc2Info.second && "Loc1 after Loc2!");
// Look for non-whitespace characters and remember any newlines seen.
for (unsigned I = Loc1Info.second; I != Loc2Info.second; ++I) {
switch (Buffer[I]) {
default:
return false;
case ' ':
case '\t':
case '\f':
case '\v':
break;
case '\r':
case '\n':
++NumNewlines;
// Check if we have found more than the maximum allowed number of
// newlines.
if (NumNewlines > MaxNewlinesAllowed)
return false;
// Collapse \r\n and \n\r into a single newline.
if (I + 1 != Loc2Info.second &&
(Buffer[I + 1] == '\n' || Buffer[I + 1] == '\r') &&
Buffer[I] != Buffer[I + 1])
++I;
break;
}
}
return true;
}
void RawCommentList::addComment(const RawComment &RC,
llvm::BumpPtrAllocator &Allocator) {
if (RC.isInvalid())
return;
// Check if the comments are not in source order.
while (!Comments.empty() &&
!SourceMgr.isBeforeInTranslationUnit(Comments.back()->getLocStart(),
RC.getLocStart())) {
// If they are, just pop a few last comments that don't fit.
// This happens if an \#include directive contains comments.
Comments.pop_back();
}
// Ordinary comments are not interesting for us.
if (RC.isOrdinary())
return;
// If this is the first Doxygen comment, save it (because there isn't
// anything to merge it with).
if (Comments.empty()) {
Comments.push_back(new (Allocator) RawComment(RC));
return;
}
const RawComment &C1 = *Comments.back();
const RawComment &C2 = RC;
// Merge comments only if there is only whitespace between them.
// Can't merge trailing and non-trailing comments.
// Merge comments if they are on same or consecutive lines.
if (C1.isTrailingComment() == C2.isTrailingComment() &&
onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(),
/*MaxNewlinesAllowed=*/1)) {
SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd());
*Comments.back() = RawComment(SourceMgr, MergedRange, true,
RC.isParseAllComments());
} else {
Comments.push_back(new (Allocator) RawComment(RC));
}
}
void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> DeserializedComments) {
std::vector<RawComment *> MergedComments;
MergedComments.reserve(Comments.size() + DeserializedComments.size());
std::merge(Comments.begin(), Comments.end(),
DeserializedComments.begin(), DeserializedComments.end(),
std::back_inserter(MergedComments),
BeforeThanCompare<RawComment>(SourceMgr));
std::swap(Comments, MergedComments);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/NestedNameSpecifier.cpp | //===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the NestedNameSpecifier class, which represents
// a C++ nested-name-specifier.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
NestedNameSpecifier *
NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
const NestedNameSpecifier &Mockup) {
llvm::FoldingSetNodeID ID;
Mockup.Profile(ID);
void *InsertPos = nullptr;
NestedNameSpecifier *NNS
= Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
if (!NNS) {
NNS = new (Context, llvm::alignOf<NestedNameSpecifier>())
NestedNameSpecifier(Mockup);
Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
}
return NNS;
}
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *Prefix, IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(StoredIdentifier);
Mockup.Specifier = II;
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
const NamespaceDecl *NS) {
assert(NS && "Namespace cannot be NULL");
assert((!Prefix ||
(Prefix->getAsType() == nullptr &&
Prefix->getAsIdentifier() == nullptr)) &&
"Broken nested name specifier");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(StoredDecl);
Mockup.Specifier = const_cast<NamespaceDecl *>(NS);
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
NamespaceAliasDecl *Alias) {
assert(Alias && "Namespace alias cannot be NULL");
assert((!Prefix ||
(Prefix->getAsType() == nullptr &&
Prefix->getAsIdentifier() == nullptr)) &&
"Broken nested name specifier");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(StoredDecl);
Mockup.Specifier = Alias;
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context,
NestedNameSpecifier *Prefix,
bool Template, const Type *T) {
assert(T && "Type cannot be NULL");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(Prefix);
Mockup.Prefix.setInt(Template? StoredTypeSpecWithTemplate : StoredTypeSpec);
Mockup.Specifier = const_cast<Type*>(T);
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
assert(II && "Identifier cannot be NULL");
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(nullptr);
Mockup.Prefix.setInt(StoredIdentifier);
Mockup.Specifier = II;
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier *
NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
if (!Context.GlobalNestedNameSpecifier)
Context.GlobalNestedNameSpecifier =
new (Context, llvm::alignOf<NestedNameSpecifier>())
NestedNameSpecifier();
return Context.GlobalNestedNameSpecifier;
}
NestedNameSpecifier *
NestedNameSpecifier::SuperSpecifier(const ASTContext &Context,
CXXRecordDecl *RD) {
NestedNameSpecifier Mockup;
Mockup.Prefix.setPointer(nullptr);
Mockup.Prefix.setInt(StoredDecl);
Mockup.Specifier = RD;
return FindOrInsert(Context, Mockup);
}
NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
if (!Specifier)
return Global;
switch (Prefix.getInt()) {
case StoredIdentifier:
return Identifier;
case StoredDecl: {
NamedDecl *ND = static_cast<NamedDecl *>(Specifier);
if (isa<CXXRecordDecl>(ND))
return Super;
return isa<NamespaceDecl>(ND) ? Namespace : NamespaceAlias;
}
case StoredTypeSpec:
return TypeSpec;
case StoredTypeSpecWithTemplate:
return TypeSpecWithTemplate;
}
llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Retrieve the namespace stored in this nested name specifier.
NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
}
/// \brief Retrieve the namespace alias stored in this nested name specifier.
NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
}
/// \brief Retrieve the record declaration stored in this nested name specifier.
CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
if (Prefix.getInt() == StoredDecl)
return dyn_cast<CXXRecordDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
}
/// \brief Whether this nested name specifier refers to a dependent
/// type or not.
bool NestedNameSpecifier::isDependent() const {
switch (getKind()) {
case Identifier:
// Identifier specifiers always represent dependent types
return true;
case Namespace:
case NamespaceAlias:
case Global:
return false;
case Super: {
CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
for (const auto &Base : RD->bases())
if (Base.getType()->isDependentType())
return true;
return false;
}
case TypeSpec:
case TypeSpecWithTemplate:
return getAsType()->isDependentType();
}
llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Whether this nested name specifier refers to a dependent
/// type or not.
bool NestedNameSpecifier::isInstantiationDependent() const {
switch (getKind()) {
case Identifier:
// Identifier specifiers always represent dependent types
return true;
case Namespace:
case NamespaceAlias:
case Global:
case Super:
return false;
case TypeSpec:
case TypeSpecWithTemplate:
return getAsType()->isInstantiationDependentType();
}
llvm_unreachable("Invalid NNS Kind!");
}
bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
switch (getKind()) {
case Identifier:
return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
case Namespace:
case NamespaceAlias:
case Global:
case Super:
return false;
case TypeSpec:
case TypeSpecWithTemplate:
return getAsType()->containsUnexpandedParameterPack();
}
llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Print this nested name specifier to the given output
/// stream.
void
NestedNameSpecifier::print(raw_ostream &OS,
const PrintingPolicy &Policy) const {
if (getPrefix())
getPrefix()->print(OS, Policy);
switch (getKind()) {
case Identifier:
OS << getAsIdentifier()->getName();
break;
case Namespace:
if (getAsNamespace()->isAnonymousNamespace())
return;
OS << getAsNamespace()->getName();
break;
case NamespaceAlias:
OS << getAsNamespaceAlias()->getName();
break;
case Global:
break;
case Super:
OS << "__super";
break;
case TypeSpecWithTemplate:
OS << "template ";
// Fall through to print the type.
LLVM_FALLTHROUGH; // HLSL Change
case TypeSpec: {
const Type *T = getAsType();
PrintingPolicy InnerPolicy(Policy);
InnerPolicy.SuppressScope = true;
// Nested-name-specifiers are intended to contain minimally-qualified
// types. An actual ElaboratedType will not occur, since we'll store
// just the type that is referred to in the nested-name-specifier (e.g.,
// a TypedefType, TagType, etc.). However, when we are dealing with
// dependent template-id types (e.g., Outer<T>::template Inner<U>),
// the type requires its own nested-name-specifier for uniqueness, so we
// suppress that nested-name-specifier during printing.
assert(!isa<ElaboratedType>(T) &&
"Elaborated type in nested-name-specifier");
if (const TemplateSpecializationType *SpecType
= dyn_cast<TemplateSpecializationType>(T)) {
// Print the template name without its corresponding
// nested-name-specifier.
SpecType->getTemplateName().print(OS, InnerPolicy, true);
// Print the template argument list.
TemplateSpecializationType::PrintTemplateArgumentList(
OS, SpecType->getArgs(), SpecType->getNumArgs(), InnerPolicy);
} else {
// Print the type normally
QualType(T, 0).print(OS, InnerPolicy);
}
break;
}
}
OS << "::";
}
void NestedNameSpecifier::dump(const LangOptions &LO) {
print(llvm::errs(), PrintingPolicy(LO));
}
unsigned
NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
assert(Qualifier && "Expected a non-NULL qualifier");
// Location of the trailing '::'.
unsigned Length = sizeof(unsigned);
switch (Qualifier->getKind()) {
case NestedNameSpecifier::Global:
// Nothing more to add.
break;
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Super:
// The location of the identifier or namespace name.
Length += sizeof(unsigned);
break;
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec:
// The "void*" that points at the TypeLoc data.
// Note: the 'template' keyword is part of the TypeLoc.
Length += sizeof(void *);
break;
}
return Length;
}
unsigned
NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
unsigned Length = 0;
for (; Qualifier; Qualifier = Qualifier->getPrefix())
Length += getLocalDataLength(Qualifier);
return Length;
}
namespace {
/// \brief Load a (possibly unaligned) source location from a given address
/// and offset.
SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
unsigned Raw;
memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned));
return SourceLocation::getFromRawEncoding(Raw);
}
/// \brief Load a (possibly unaligned) pointer from a given address and
/// offset.
void *LoadPointer(void *Data, unsigned Offset) {
void *Result;
memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
return Result;
}
}
SourceRange NestedNameSpecifierLoc::getSourceRange() const {
if (!Qualifier)
return SourceRange();
NestedNameSpecifierLoc First = *this;
while (NestedNameSpecifierLoc Prefix = First.getPrefix())
First = Prefix;
return SourceRange(First.getLocalSourceRange().getBegin(),
getLocalSourceRange().getEnd());
}
SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
if (!Qualifier)
return SourceRange();
unsigned Offset = getDataLength(Qualifier->getPrefix());
switch (Qualifier->getKind()) {
case NestedNameSpecifier::Global:
return LoadSourceLocation(Data, Offset);
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Super:
return SourceRange(LoadSourceLocation(Data, Offset),
LoadSourceLocation(Data, Offset + sizeof(unsigned)));
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec: {
// The "void*" that points at the TypeLoc data.
// Note: the 'template' keyword is part of the TypeLoc.
void *TypeData = LoadPointer(Data, Offset);
TypeLoc TL(Qualifier->getAsType(), TypeData);
return SourceRange(TL.getBeginLoc(),
LoadSourceLocation(Data, Offset + sizeof(void*)));
}
}
llvm_unreachable("Invalid NNS Kind!");
}
TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec ||
Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) &&
"Nested-name-specifier location is not a type");
// The "void*" that points at the TypeLoc data.
unsigned Offset = getDataLength(Qualifier->getPrefix());
void *TypeData = LoadPointer(Data, Offset);
return TypeLoc(Qualifier->getAsType(), TypeData);
}
namespace {
void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
unsigned &BufferCapacity) {
if (Start == End)
return;
if (BufferSize + (End - Start) > BufferCapacity) {
// Reallocate the buffer.
unsigned NewCapacity = std::max(
(unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
(unsigned)(BufferSize + (End - Start)));
char *NewBuffer =
new char[NewCapacity]; // HLSL Change: Use overridable operator new
if (BufferCapacity) {
memcpy(NewBuffer, Buffer, BufferSize);
delete[] Buffer; // HLSL Change: Use overridable operator delete
}
Buffer = NewBuffer;
BufferCapacity = NewCapacity;
}
memcpy(Buffer + BufferSize, Start, End - Start);
BufferSize += End - Start;
}
/// \brief Save a source location to the given buffer.
void SaveSourceLocation(SourceLocation Loc, char *&Buffer, unsigned &BufferSize,
unsigned &BufferCapacity) {
unsigned Raw = Loc.getRawEncoding();
Append(reinterpret_cast<char *>(&Raw),
reinterpret_cast<char *>(&Raw) + sizeof(unsigned), Buffer, BufferSize,
BufferCapacity);
}
/// \brief Save a pointer to the given buffer.
void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize,
unsigned &BufferCapacity) {
Append(reinterpret_cast<char *>(&Ptr),
reinterpret_cast<char *>(&Ptr) + sizeof(void *), Buffer, BufferSize,
BufferCapacity);
}
}
NestedNameSpecifierLocBuilder::
NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other)
: Representation(Other.Representation), Buffer(nullptr),
BufferSize(0), BufferCapacity(0)
{
if (!Other.Buffer)
return;
if (Other.BufferCapacity == 0) {
// Shallow copy is okay.
Buffer = Other.Buffer;
BufferSize = Other.BufferSize;
return;
}
// Deep copy
Append(Other.Buffer, Other.Buffer + Other.BufferSize, Buffer, BufferSize,
BufferCapacity);
}
NestedNameSpecifierLocBuilder &
NestedNameSpecifierLocBuilder::
operator=(const NestedNameSpecifierLocBuilder &Other) {
Representation = Other.Representation;
if (Buffer && Other.Buffer && BufferCapacity >= Other.BufferSize) {
// Re-use our storage.
BufferSize = Other.BufferSize;
memcpy(Buffer, Other.Buffer, BufferSize);
return *this;
}
// Free our storage, if we have any.
if (BufferCapacity) {
free(Buffer);
BufferCapacity = 0;
}
if (!Other.Buffer) {
// Empty.
Buffer = nullptr;
BufferSize = 0;
return *this;
}
if (Other.BufferCapacity == 0) {
// Shallow copy is okay.
Buffer = Other.Buffer;
BufferSize = Other.BufferSize;
return *this;
}
// Deep copy.
Append(Other.Buffer, Other.Buffer + Other.BufferSize, Buffer, BufferSize,
BufferCapacity);
return *this;
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
SourceLocation TemplateKWLoc,
TypeLoc TL,
SourceLocation ColonColonLoc) {
Representation = NestedNameSpecifier::Create(Context, Representation,
TemplateKWLoc.isValid(),
TL.getTypePtr());
// Push source-location info into the buffer.
SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
IdentifierInfo *Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonColonLoc) {
Representation = NestedNameSpecifier::Create(Context, Representation,
Identifier);
// Push source-location info into the buffer.
SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
NamespaceDecl *Namespace,
SourceLocation NamespaceLoc,
SourceLocation ColonColonLoc) {
Representation = NestedNameSpecifier::Create(Context, Representation,
Namespace);
// Push source-location info into the buffer.
SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
NamespaceAliasDecl *Alias,
SourceLocation AliasLoc,
SourceLocation ColonColonLoc) {
Representation = NestedNameSpecifier::Create(Context, Representation, Alias);
// Push source-location info into the buffer.
SaveSourceLocation(AliasLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
SourceLocation ColonColonLoc) {
assert(!Representation && "Already have a nested-name-specifier!?");
Representation = NestedNameSpecifier::GlobalSpecifier(Context);
// Push source-location info into the buffer.
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::MakeSuper(ASTContext &Context,
CXXRecordDecl *RD,
SourceLocation SuperLoc,
SourceLocation ColonColonLoc) {
Representation = NestedNameSpecifier::SuperSpecifier(Context, RD);
// Push source-location info into the buffer.
SaveSourceLocation(SuperLoc, Buffer, BufferSize, BufferCapacity);
SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
}
void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
NestedNameSpecifier *Qualifier,
SourceRange R) {
Representation = Qualifier;
// Construct bogus (but well-formed) source information for the
// nested-name-specifier.
BufferSize = 0;
SmallVector<NestedNameSpecifier *, 4> Stack;
for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
Stack.push_back(NNS);
while (!Stack.empty()) {
NestedNameSpecifier *NNS = Stack.pop_back_val();
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
break;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
TypeSourceInfo *TSInfo
= Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
R.getBegin());
SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
BufferCapacity);
break;
}
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
break;
}
// Save the location of the '::'.
SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
Buffer, BufferSize, BufferCapacity);
}
}
void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
if (BufferCapacity)
free(Buffer);
if (!Other) {
Representation = nullptr;
BufferSize = 0;
return;
}
// Rather than copying the data (which is wasteful), "adopt" the
// pointer (which points into the ASTContext) but set the capacity to zero to
// indicate that we don't own it.
Representation = Other.getNestedNameSpecifier();
Buffer = static_cast<char *>(Other.getOpaqueData());
BufferSize = Other.getDataLength();
BufferCapacity = 0;
}
NestedNameSpecifierLoc
NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
if (!Representation)
return NestedNameSpecifierLoc();
// If we adopted our data pointer from elsewhere in the AST context, there's
// no need to copy the memory.
if (BufferCapacity == 0)
return NestedNameSpecifierLoc(Representation, Buffer);
// FIXME: After copying the source-location information, should we free
// our (temporary) buffer and adopt the ASTContext-allocated memory?
// Doing so would optimize repeated calls to getWithLocInContext().
void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>());
memcpy(Mem, Buffer, BufferSize);
return NestedNameSpecifierLoc(Representation, Mem);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/APValue.cpp | //===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the APValue class.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
namespace {
struct LVBase {
llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
CharUnits Offset;
unsigned PathLength;
unsigned CallIndex;
};
}
struct APValue::LV : LVBase {
static const unsigned InlinePathSpace =
(DataSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
/// Path - The sequence of base classes, fields and array indices to follow to
/// walk from Base to the subobject. When performing GCC-style folding, there
/// may not be such a path.
union {
LValuePathEntry Path[InlinePathSpace];
LValuePathEntry *PathPtr;
};
LV() { PathLength = (unsigned)-1; }
~LV() { resizePath(0); }
void resizePath(unsigned Length) {
if (Length == PathLength)
return;
if (hasPathPtr())
delete [] PathPtr;
PathLength = Length;
if (hasPathPtr())
PathPtr = new LValuePathEntry[Length];
}
bool hasPath() const { return PathLength != (unsigned)-1; }
bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; }
LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; }
const LValuePathEntry *getPath() const {
return hasPathPtr() ? PathPtr : Path;
}
};
namespace {
struct MemberPointerBase {
llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember;
unsigned PathLength;
};
}
struct APValue::MemberPointerData : MemberPointerBase {
static const unsigned InlinePathSpace =
(DataSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
typedef const CXXRecordDecl *PathElem;
union {
PathElem Path[InlinePathSpace];
PathElem *PathPtr;
};
MemberPointerData() { PathLength = 0; }
~MemberPointerData() { resizePath(0); }
void resizePath(unsigned Length) {
if (Length == PathLength)
return;
if (hasPathPtr())
delete [] PathPtr;
PathLength = Length;
if (hasPathPtr())
PathPtr = new PathElem[Length];
}
bool hasPathPtr() const { return PathLength > InlinePathSpace; }
PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; }
const PathElem *getPath() const {
return hasPathPtr() ? PathPtr : Path;
}
};
// FIXME: Reduce the malloc traffic here.
APValue::Arr::Arr(unsigned NumElts, unsigned Size) :
Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]),
NumElts(NumElts), ArrSize(Size) {}
APValue::Arr::~Arr() { delete [] Elts; }
APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) :
Elts(new APValue[NumBases+NumFields]),
NumBases(NumBases), NumFields(NumFields) {}
APValue::StructData::~StructData() {
delete [] Elts;
}
APValue::UnionData::UnionData() : Field(nullptr), Value(new APValue) {}
APValue::UnionData::~UnionData () {
delete Value;
}
APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
switch (RHS.getKind()) {
case Uninitialized:
break;
case Int:
MakeInt();
setInt(RHS.getInt());
break;
case Float:
MakeFloat();
setFloat(RHS.getFloat());
break;
case Vector:
MakeVector();
setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
RHS.getVectorLength());
break;
case ComplexInt:
MakeComplexInt();
setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
break;
case ComplexFloat:
MakeComplexFloat();
setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
break;
case LValue:
MakeLValue();
if (RHS.hasLValuePath())
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
else
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
RHS.getLValueCallIndex());
break;
case Array:
MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I)
getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I);
if (RHS.hasArrayFiller())
getArrayFiller() = RHS.getArrayFiller();
break;
case Struct:
MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields());
for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I)
getStructBase(I) = RHS.getStructBase(I);
for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I)
getStructField(I) = RHS.getStructField(I);
break;
case Union:
MakeUnion();
setUnion(RHS.getUnionField(), RHS.getUnionValue());
break;
case MemberPointer:
MakeMemberPointer(RHS.getMemberPointerDecl(),
RHS.isMemberPointerToDerivedMember(),
RHS.getMemberPointerPath());
break;
case AddrLabelDiff:
MakeAddrLabelDiff();
setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS());
break;
}
}
void APValue::DestroyDataAndMakeUninit() {
if (Kind == Int)
((APSInt*)(char*)Data.buffer)->~APSInt();
else if (Kind == Float)
((APFloat*)(char*)Data.buffer)->~APFloat();
else if (Kind == Vector)
((Vec*)(char*)Data.buffer)->~Vec();
else if (Kind == ComplexInt)
((ComplexAPSInt*)(char*)Data.buffer)->~ComplexAPSInt();
else if (Kind == ComplexFloat)
((ComplexAPFloat*)(char*)Data.buffer)->~ComplexAPFloat();
else if (Kind == LValue)
((LV*)(char*)Data.buffer)->~LV();
else if (Kind == Array)
((Arr*)(char*)Data.buffer)->~Arr();
else if (Kind == Struct)
((StructData*)(char*)Data.buffer)->~StructData();
else if (Kind == Union)
((UnionData*)(char*)Data.buffer)->~UnionData();
else if (Kind == MemberPointer)
((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
else if (Kind == AddrLabelDiff)
((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
Kind = Uninitialized;
}
bool APValue::needsCleanup() const {
switch (getKind()) {
case Uninitialized:
case AddrLabelDiff:
return false;
case Struct:
case Union:
case Array:
case Vector:
return true;
case Int:
return getInt().needsCleanup();
case Float:
return getFloat().needsCleanup();
case ComplexFloat:
assert(getComplexFloatImag().needsCleanup() ==
getComplexFloatReal().needsCleanup() &&
"In _Complex float types, real and imaginary values always have the "
"same size.");
return getComplexFloatReal().needsCleanup();
case ComplexInt:
assert(getComplexIntImag().needsCleanup() ==
getComplexIntReal().needsCleanup() &&
"In _Complex int types, real and imaginary values must have the "
"same size.");
return getComplexIntReal().needsCleanup();
case LValue:
return reinterpret_cast<const LV *>(Data.buffer)->hasPathPtr();
case MemberPointer:
return reinterpret_cast<const MemberPointerData *>(Data.buffer)
->hasPathPtr();
}
llvm_unreachable("Unknown APValue kind!");
}
void APValue::swap(APValue &RHS) {
std::swap(Kind, RHS.Kind);
char TmpData[DataSize];
memcpy(TmpData, Data.buffer, DataSize);
memcpy(Data.buffer, RHS.Data.buffer, DataSize);
memcpy(RHS.Data.buffer, TmpData, DataSize);
}
void APValue::dump() const {
dump(llvm::errs());
llvm::errs() << '\n';
}
static double GetApproxValue(const llvm::APFloat &F) {
llvm::APFloat V = F;
bool ignored;
V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
&ignored);
return V.convertToDouble();
}
void APValue::dump(raw_ostream &OS) const {
switch (getKind()) {
case Uninitialized:
OS << "Uninitialized";
return;
case Int:
OS << "Int: " << getInt();
return;
case Float:
OS << "Float: " << GetApproxValue(getFloat());
return;
case Vector:
OS << "Vector: ";
getVectorElt(0).dump(OS);
for (unsigned i = 1; i != getVectorLength(); ++i) {
OS << ", ";
getVectorElt(i).dump(OS);
}
return;
case ComplexInt:
OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
return;
case ComplexFloat:
OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
<< ", " << GetApproxValue(getComplexFloatImag());
return;
case LValue:
OS << "LValue: <todo>";
return;
case Array:
OS << "Array: ";
for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
getArrayInitializedElt(I).dump(OS);
if (I != getArraySize() - 1) OS << ", ";
}
if (hasArrayFiller()) {
OS << getArraySize() - getArrayInitializedElts() << " x ";
getArrayFiller().dump(OS);
}
return;
case Struct:
OS << "Struct ";
if (unsigned N = getStructNumBases()) {
OS << " bases: ";
getStructBase(0).dump(OS);
for (unsigned I = 1; I != N; ++I) {
OS << ", ";
getStructBase(I).dump(OS);
}
}
if (unsigned N = getStructNumFields()) {
OS << " fields: ";
getStructField(0).dump(OS);
for (unsigned I = 1; I != N; ++I) {
OS << ", ";
getStructField(I).dump(OS);
}
}
return;
case Union:
OS << "Union: ";
getUnionValue().dump(OS);
return;
case MemberPointer:
OS << "MemberPointer: <todo>";
return;
case AddrLabelDiff:
OS << "AddrLabelDiff: <todo>";
return;
}
llvm_unreachable("Unknown APValue kind!");
}
void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
switch (getKind()) {
case APValue::Uninitialized:
Out << "<uninitialized>";
return;
case APValue::Int:
if (Ty->isBooleanType())
Out << (getInt().getBoolValue() ? "true" : "false");
else
Out << getInt();
return;
case APValue::Float:
Out << GetApproxValue(getFloat());
return;
case APValue::Vector: {
Out << '{';
QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
getVectorElt(0).printPretty(Out, Ctx, ElemTy);
for (unsigned i = 1; i != getVectorLength(); ++i) {
Out << ", ";
getVectorElt(i).printPretty(Out, Ctx, ElemTy);
}
Out << '}';
return;
}
case APValue::ComplexInt:
Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
return;
case APValue::ComplexFloat:
Out << GetApproxValue(getComplexFloatReal()) << "+"
<< GetApproxValue(getComplexFloatImag()) << "i";
return;
case APValue::LValue: {
LValueBase Base = getLValueBase();
if (!Base) {
Out << "0";
return;
}
bool IsReference = Ty->isReferenceType();
QualType InnerTy
= IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
if (InnerTy.isNull())
InnerTy = Ty;
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
if (!O.isZero()) {
if (IsReference)
Out << "*(";
if (O % S) {
Out << "(char*)";
S = CharUnits::One();
}
Out << '&';
} else if (!IsReference)
Out << '&';
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Out << *VD;
else {
assert(Base.get<const Expr *>() != nullptr &&
"Expecting non-null Expr");
Base.get<const Expr*>()->printPretty(Out, nullptr,
Ctx.getPrintingPolicy());
}
if (!O.isZero()) {
Out << " + " << (O / S);
if (IsReference)
Out << ')';
}
return;
}
// We have an lvalue path. Print it out nicely.
if (!IsReference)
Out << '&';
else if (isLValueOnePastTheEnd())
Out << "*(&";
QualType ElemTy;
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
Out << *VD;
ElemTy = VD->getType();
} else {
const Expr *E = Base.get<const Expr*>();
assert(E != nullptr && "Expecting non-null Expr");
E->printPretty(Out, nullptr, Ctx.getPrintingPolicy());
ElemTy = E->getType();
}
ArrayRef<LValuePathEntry> Path = getLValuePath();
const CXXRecordDecl *CastToBase = nullptr;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (ElemTy->getAs<RecordType>()) {
// The lvalue refers to a class type, so the next path entry is a base
// or member.
const Decl *BaseOrMember =
BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
CastToBase = RD;
ElemTy = Ctx.getRecordType(RD);
} else {
const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
Out << ".";
if (CastToBase)
Out << *CastToBase << "::";
Out << *VD;
ElemTy = VD->getType();
}
} else {
// The lvalue must refer to an array.
Out << '[' << Path[I].ArrayIndex << ']';
ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
}
}
// Handle formatting of one-past-the-end lvalues.
if (isLValueOnePastTheEnd()) {
// FIXME: If CastToBase is non-0, we should prefix the output with
// "(CastToBase*)".
Out << " + 1";
if (IsReference)
Out << ')';
}
return;
}
case APValue::Array: {
const ArrayType *AT = Ctx.getAsArrayType(Ty);
QualType ElemTy = AT->getElementType();
Out << '{';
if (unsigned N = getArrayInitializedElts()) {
getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
for (unsigned I = 1; I != N; ++I) {
Out << ", ";
if (I == 10) {
// Avoid printing out the entire contents of large arrays.
Out << "...";
break;
}
getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
}
}
Out << '}';
return;
}
case APValue::Struct: {
Out << '{';
const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
bool First = true;
if (unsigned N = getStructNumBases()) {
const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
for (unsigned I = 0; I != N; ++I, ++BI) {
assert(BI != CD->bases_end());
if (!First)
Out << ", ";
getStructBase(I).printPretty(Out, Ctx, BI->getType());
First = false;
}
}
for (const auto *FI : RD->fields()) {
if (!First)
Out << ", ";
if (FI->isUnnamedBitfield()) continue;
getStructField(FI->getFieldIndex()).
printPretty(Out, Ctx, FI->getType());
First = false;
}
Out << '}';
return;
}
case APValue::Union:
Out << '{';
if (const FieldDecl *FD = getUnionField()) {
Out << "." << *FD << " = ";
getUnionValue().printPretty(Out, Ctx, FD->getType());
}
Out << '}';
return;
case APValue::MemberPointer:
// FIXME: This is not enough to unambiguously identify the member in a
// multiple-inheritance scenario.
if (const ValueDecl *VD = getMemberPointerDecl()) {
Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
return;
}
Out << "0";
return;
case APValue::AddrLabelDiff:
Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
Out << " - ";
Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
return;
}
llvm_unreachable("Unknown APValue kind!");
}
std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
std::string Result;
llvm::raw_string_ostream Out(Result);
printPretty(Out, Ctx, Ty);
Out.flush();
return Result;
}
const APValue::LValueBase APValue::getLValueBase() const {
assert(isLValue() && "Invalid accessor");
return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getPointer();
}
bool APValue::isLValueOnePastTheEnd() const {
assert(isLValue() && "Invalid accessor");
return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getInt();
}
CharUnits &APValue::getLValueOffset() {
assert(isLValue() && "Invalid accessor");
return ((LV*)(void*)Data.buffer)->Offset;
}
bool APValue::hasLValuePath() const {
assert(isLValue() && "Invalid accessor");
return ((const LV*)(const char*)Data.buffer)->hasPath();
}
ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
assert(isLValue() && hasLValuePath() && "Invalid accessor");
const LV &LVal = *((const LV*)(const char*)Data.buffer);
return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength);
}
unsigned APValue::getLValueCallIndex() const {
assert(isLValue() && "Invalid accessor");
return ((const LV*)(const char*)Data.buffer)->CallIndex;
}
void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
unsigned CallIndex) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
LVal.BaseAndIsOnePastTheEnd.setInt(false);
LVal.Offset = O;
LVal.CallIndex = CallIndex;
LVal.resizePath((unsigned)-1);
}
void APValue::setLValue(LValueBase B, const CharUnits &O,
ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
unsigned CallIndex) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
LVal.Offset = O;
LVal.CallIndex = CallIndex;
LVal.resizePath(Path.size());
memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
}
const ValueDecl *APValue::getMemberPointerDecl() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
*((const MemberPointerData *)(const char *)Data.buffer);
return MPD.MemberAndIsDerivedMember.getPointer();
}
bool APValue::isMemberPointerToDerivedMember() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
*((const MemberPointerData *)(const char *)Data.buffer);
return MPD.MemberAndIsDerivedMember.getInt();
}
ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
*((const MemberPointerData *)(const char *)Data.buffer);
return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
assert(isUninit() && "Bad state change");
static_assert(sizeof(LV) <= DataSize, "LV too big");
new ((void*)(char*)Data.buffer) LV();
Kind = LValue;
}
void APValue::MakeArray(unsigned InitElts, unsigned Size) {
assert(isUninit() && "Bad state change");
new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
Kind = Array;
}
void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
ArrayRef<const CXXRecordDecl*> Path) {
assert(isUninit() && "Bad state change");
MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
Kind = MemberPointer;
MPD->MemberAndIsDerivedMember.setPointer(Member);
MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
MPD->resizePath(Path.size());
memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Decl.cpp | //===--- Decl.cpp - Declaration AST Node Implementation -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Decl subclasses.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Decl.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
using namespace clang;
Decl *clang::getPrimaryMergedDecl(Decl *D) {
return D->getASTContext().getPrimaryMergedDecl(D);
}
// Defined here so that it can be inlined into its direct callers.
bool Decl::isOutOfLine() const {
return !getLexicalDeclContext()->Equals(getDeclContext());
}
TranslationUnitDecl::TranslationUnitDecl(ASTContext &ctx)
: Decl(TranslationUnit, nullptr, SourceLocation()),
DeclContext(TranslationUnit), Ctx(ctx), AnonymousNamespace(nullptr) {
Hidden = Ctx.getLangOpts().ModulesLocalVisibility;
}
//===----------------------------------------------------------------------===//
// NamedDecl Implementation
//===----------------------------------------------------------------------===//
// Visibility rules aren't rigorously externally specified, but here
// are the basic principles behind what we implement:
//
// 1. An explicit visibility attribute is generally a direct expression
// of the user's intent and should be honored. Only the innermost
// visibility attribute applies. If no visibility attribute applies,
// global visibility settings are considered.
//
// 2. There is one caveat to the above: on or in a template pattern,
// an explicit visibility attribute is just a default rule, and
// visibility can be decreased by the visibility of template
// arguments. But this, too, has an exception: an attribute on an
// explicit specialization or instantiation causes all the visibility
// restrictions of the template arguments to be ignored.
//
// 3. A variable that does not otherwise have explicit visibility can
// be restricted by the visibility of its type.
//
// 4. A visibility restriction is explicit if it comes from an
// attribute (or something like it), not a global visibility setting.
// When emitting a reference to an external symbol, visibility
// restrictions are ignored unless they are explicit.
//
// 5. When computing the visibility of a non-type, including a
// non-type member of a class, only non-type visibility restrictions
// are considered: the 'visibility' attribute, global value-visibility
// settings, and a few special cases like __private_extern.
//
// 6. When computing the visibility of a type, including a type member
// of a class, only type visibility restrictions are considered:
// the 'type_visibility' attribute and global type-visibility settings.
// However, a 'visibility' attribute counts as a 'type_visibility'
// attribute on any declaration that only has the former.
//
// The visibility of a "secondary" entity, like a template argument,
// is computed using the kind of that entity, not the kind of the
// primary entity for which we are computing visibility. For example,
// the visibility of a specialization of either of these templates:
// template <class T, bool (&compare)(T, X)> bool has_match(list<T>, X);
// template <class T, bool (&compare)(T, X)> class matcher;
// is restricted according to the type visibility of the argument 'T',
// the type visibility of 'bool(&)(T,X)', and the value visibility of
// the argument function 'compare'. That 'has_match' is a value
// and 'matcher' is a type only matters when looking for attributes
// and settings from the immediate context.
const unsigned IgnoreExplicitVisibilityBit = 2;
const unsigned IgnoreAllVisibilityBit = 4;
/// Kinds of LV computation. The linkage side of the computation is
/// always the same, but different things can change how visibility is
/// computed.
enum LVComputationKind {
/// Do an LV computation for, ultimately, a type.
/// Visibility may be restricted by type visibility settings and
/// the visibility of template arguments.
LVForType = NamedDecl::VisibilityForType,
/// Do an LV computation for, ultimately, a non-type declaration.
/// Visibility may be restricted by value visibility settings and
/// the visibility of template arguments.
LVForValue = NamedDecl::VisibilityForValue,
/// Do an LV computation for, ultimately, a type that already has
/// some sort of explicit visibility. Visibility may only be
/// restricted by the visibility of template arguments.
LVForExplicitType = (LVForType | IgnoreExplicitVisibilityBit),
/// Do an LV computation for, ultimately, a non-type declaration
/// that already has some sort of explicit visibility. Visibility
/// may only be restricted by the visibility of template arguments.
LVForExplicitValue = (LVForValue | IgnoreExplicitVisibilityBit),
/// Do an LV computation when we only care about the linkage.
LVForLinkageOnly =
LVForValue | IgnoreExplicitVisibilityBit | IgnoreAllVisibilityBit
};
/// Does this computation kind permit us to consider additional
/// visibility settings from attributes and the like?
static bool hasExplicitVisibilityAlready(LVComputationKind computation) {
return ((unsigned(computation) & IgnoreExplicitVisibilityBit) != 0);
}
/// Given an LVComputationKind, return one of the same type/value sort
/// that records that it already has explicit visibility.
static LVComputationKind
withExplicitVisibilityAlready(LVComputationKind oldKind) {
LVComputationKind newKind =
static_cast<LVComputationKind>(unsigned(oldKind) |
IgnoreExplicitVisibilityBit);
assert(oldKind != LVForType || newKind == LVForExplicitType);
assert(oldKind != LVForValue || newKind == LVForExplicitValue);
assert(oldKind != LVForExplicitType || newKind == LVForExplicitType);
assert(oldKind != LVForExplicitValue || newKind == LVForExplicitValue);
return newKind;
}
static Optional<Visibility> getExplicitVisibility(const NamedDecl *D,
LVComputationKind kind) {
assert(!hasExplicitVisibilityAlready(kind) &&
"asking for explicit visibility when we shouldn't be");
return D->getExplicitVisibility((NamedDecl::ExplicitVisibilityKind) kind);
}
/// Is the given declaration a "type" or a "value" for the purposes of
/// visibility computation?
static bool usesTypeVisibility(const NamedDecl *D) {
return isa<TypeDecl>(D) ||
isa<ClassTemplateDecl>(D) ||
isa<ObjCInterfaceDecl>(D);
}
/// Does the given declaration have member specialization information,
/// and if so, is it an explicit specialization?
template <class T> static typename
std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type
isExplicitMemberSpecialization(const T *D) {
if (const MemberSpecializationInfo *member =
D->getMemberSpecializationInfo()) {
return member->isExplicitSpecialization();
}
return false;
}
/// For templates, this question is easier: a member template can't be
/// explicitly instantiated, so there's a single bit indicating whether
/// or not this is an explicit member specialization.
static bool isExplicitMemberSpecialization(const RedeclarableTemplateDecl *D) {
return D->isMemberSpecialization();
}
/// Given a visibility attribute, return the explicit visibility
/// associated with it.
template <class T>
static Visibility getVisibilityFromAttr(const T *attr) {
switch (attr->getVisibility()) {
case T::Default:
return DefaultVisibility;
case T::Hidden:
return HiddenVisibility;
case T::Protected:
return ProtectedVisibility;
}
llvm_unreachable("bad visibility kind");
}
/// Return the explicit visibility of the given declaration.
static Optional<Visibility> getVisibilityOf(const NamedDecl *D,
NamedDecl::ExplicitVisibilityKind kind) {
// If we're ultimately computing the visibility of a type, look for
// a 'type_visibility' attribute before looking for 'visibility'.
if (kind == NamedDecl::VisibilityForType) {
if (const TypeVisibilityAttr *A = D->getAttr<TypeVisibilityAttr>()) {
return getVisibilityFromAttr(A);
}
}
// If this declaration has an explicit visibility attribute, use it.
if (const VisibilityAttr *A = D->getAttr<VisibilityAttr>()) {
return getVisibilityFromAttr(A);
}
// If we're on Mac OS X, an 'availability' for Mac OS X attribute
// implies visibility(default).
if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
for (const auto *A : D->specific_attrs<AvailabilityAttr>())
if (A->getPlatform()->getName().equals("macosx"))
return DefaultVisibility;
}
return None;
}
static LinkageInfo
getLVForType(const Type &T, LVComputationKind computation) {
if (computation == LVForLinkageOnly)
return LinkageInfo(T.getLinkage(), DefaultVisibility, true);
return T.getLinkageAndVisibility();
}
/// \brief Get the most restrictive linkage for the types in the given
/// template parameter list. For visibility purposes, template
/// parameters are part of the signature of a template.
static LinkageInfo
getLVForTemplateParameterList(const TemplateParameterList *Params,
LVComputationKind computation) {
LinkageInfo LV;
for (const NamedDecl *P : *Params) {
// Template type parameters are the most common and never
// contribute to visibility, pack or not.
if (isa<TemplateTypeParmDecl>(P))
continue;
// Non-type template parameters can be restricted by the value type, e.g.
// template <enum X> class A { ... };
// We have to be careful here, though, because we can be dealing with
// dependent types.
if (const NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(P)) {
// Handle the non-pack case first.
if (!NTTP->isExpandedParameterPack()) {
if (!NTTP->getType()->isDependentType()) {
LV.merge(getLVForType(*NTTP->getType(), computation));
}
continue;
}
// Look at all the types in an expanded pack.
for (unsigned i = 0, n = NTTP->getNumExpansionTypes(); i != n; ++i) {
QualType type = NTTP->getExpansionType(i);
if (!type->isDependentType())
LV.merge(type->getLinkageAndVisibility());
}
continue;
}
// Template template parameters can be restricted by their
// template parameters, recursively.
const TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(P);
// Handle the non-pack case first.
if (!TTP->isExpandedParameterPack()) {
LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters(),
computation));
continue;
}
// Look at all expansions in an expanded pack.
for (unsigned i = 0, n = TTP->getNumExpansionTemplateParameters();
i != n; ++i) {
LV.merge(getLVForTemplateParameterList(
TTP->getExpansionTemplateParameters(i), computation));
}
}
return LV;
}
/// getLVForDecl - Get the linkage and visibility for the given declaration.
static LinkageInfo getLVForDecl(const NamedDecl *D,
LVComputationKind computation);
static const Decl *getOutermostFuncOrBlockContext(const Decl *D) {
const Decl *Ret = nullptr;
const DeclContext *DC = D->getDeclContext();
while (DC->getDeclKind() != Decl::TranslationUnit) {
if (isa<FunctionDecl>(DC) || isa<BlockDecl>(DC))
Ret = cast<Decl>(DC);
DC = DC->getParent();
}
return Ret;
}
/// \brief Get the most restrictive linkage for the types and
/// declarations in the given template argument list.
///
/// Note that we don't take an LVComputationKind because we always
/// want to honor the visibility of template arguments in the same way.
static LinkageInfo getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args,
LVComputationKind computation) {
LinkageInfo LV;
for (const TemplateArgument &Arg : Args) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Expression:
continue;
case TemplateArgument::Type:
LV.merge(getLVForType(*Arg.getAsType(), computation));
continue;
case TemplateArgument::Declaration:
if (NamedDecl *ND = dyn_cast<NamedDecl>(Arg.getAsDecl())) {
assert(!usesTypeVisibility(ND));
LV.merge(getLVForDecl(ND, computation));
}
continue;
case TemplateArgument::NullPtr:
LV.merge(Arg.getNullPtrType()->getLinkageAndVisibility());
continue;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
if (TemplateDecl *Template =
Arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl())
LV.merge(getLVForDecl(Template, computation));
continue;
case TemplateArgument::Pack:
LV.merge(getLVForTemplateArgumentList(Arg.getPackAsArray(), computation));
continue;
}
llvm_unreachable("bad template argument kind");
}
return LV;
}
static LinkageInfo
getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
LVComputationKind computation) {
return getLVForTemplateArgumentList(TArgs.asArray(), computation);
}
static bool shouldConsiderTemplateVisibility(const FunctionDecl *fn,
const FunctionTemplateSpecializationInfo *specInfo) {
// Include visibility from the template parameters and arguments
// only if this is not an explicit instantiation or specialization
// with direct explicit visibility. (Implicit instantiations won't
// have a direct attribute.)
if (!specInfo->isExplicitInstantiationOrSpecialization())
return true;
return !fn->hasAttr<VisibilityAttr>();
}
/// Merge in template-related linkage and visibility for the given
/// function template specialization.
///
/// We don't need a computation kind here because we can assume
/// LVForValue.
///
/// \param[out] LV the computation to use for the parent
static void
mergeTemplateLV(LinkageInfo &LV, const FunctionDecl *fn,
const FunctionTemplateSpecializationInfo *specInfo,
LVComputationKind computation) {
bool considerVisibility =
shouldConsiderTemplateVisibility(fn, specInfo);
// Merge information from the template parameters.
FunctionTemplateDecl *temp = specInfo->getTemplate();
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
// Merge information from the template arguments.
const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
LV.mergeMaybeWithVisibility(argsLV, considerVisibility);
}
/// Does the given declaration have a direct visibility attribute
/// that would match the given rules?
static bool hasDirectVisibilityAttribute(const NamedDecl *D,
LVComputationKind computation) {
switch (computation) {
case LVForType:
case LVForExplicitType:
if (D->hasAttr<TypeVisibilityAttr>())
return true;
LLVM_FALLTHROUGH; // HLSL Change
case LVForValue:
case LVForExplicitValue:
if (D->hasAttr<VisibilityAttr>())
return true;
return false;
case LVForLinkageOnly:
return false;
}
llvm_unreachable("bad visibility computation kind");
}
/// Should we consider visibility associated with the template
/// arguments and parameters of the given class template specialization?
static bool shouldConsiderTemplateVisibility(
const ClassTemplateSpecializationDecl *spec,
LVComputationKind computation) {
// Include visibility from the template parameters and arguments
// only if this is not an explicit instantiation or specialization
// with direct explicit visibility (and note that implicit
// instantiations won't have a direct attribute).
//
// Furthermore, we want to ignore template parameters and arguments
// for an explicit specialization when computing the visibility of a
// member thereof with explicit visibility.
//
// This is a bit complex; let's unpack it.
//
// An explicit class specialization is an independent, top-level
// declaration. As such, if it or any of its members has an
// explicit visibility attribute, that must directly express the
// user's intent, and we should honor it. The same logic applies to
// an explicit instantiation of a member of such a thing.
// Fast path: if this is not an explicit instantiation or
// specialization, we always want to consider template-related
// visibility restrictions.
if (!spec->isExplicitInstantiationOrSpecialization())
return true;
// This is the 'member thereof' check.
if (spec->isExplicitSpecialization() &&
hasExplicitVisibilityAlready(computation))
return false;
return !hasDirectVisibilityAttribute(spec, computation);
}
/// Merge in template-related linkage and visibility for the given
/// class template specialization.
static void mergeTemplateLV(LinkageInfo &LV,
const ClassTemplateSpecializationDecl *spec,
LVComputationKind computation) {
bool considerVisibility = shouldConsiderTemplateVisibility(spec, computation);
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
ClassTemplateDecl *temp = spec->getSpecializedTemplate();
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV,
considerVisibility && !hasExplicitVisibilityAlready(computation));
// Merge information from the template arguments. We ignore
// template-argument visibility if we've got an explicit
// instantiation with a visibility attribute.
const TemplateArgumentList &templateArgs = spec->getTemplateArgs();
LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
if (considerVisibility)
LV.mergeVisibility(argsLV);
LV.mergeExternalVisibility(argsLV);
}
/// Should we consider visibility associated with the template
/// arguments and parameters of the given variable template
/// specialization? As usual, follow class template specialization
/// logic up to initialization.
static bool shouldConsiderTemplateVisibility(
const VarTemplateSpecializationDecl *spec,
LVComputationKind computation) {
// Include visibility from the template parameters and arguments
// only if this is not an explicit instantiation or specialization
// with direct explicit visibility (and note that implicit
// instantiations won't have a direct attribute).
if (!spec->isExplicitInstantiationOrSpecialization())
return true;
// An explicit variable specialization is an independent, top-level
// declaration. As such, if it has an explicit visibility attribute,
// that must directly express the user's intent, and we should honor
// it.
if (spec->isExplicitSpecialization() &&
hasExplicitVisibilityAlready(computation))
return false;
return !hasDirectVisibilityAttribute(spec, computation);
}
/// Merge in template-related linkage and visibility for the given
/// variable template specialization. As usual, follow class template
/// specialization logic up to initialization.
static void mergeTemplateLV(LinkageInfo &LV,
const VarTemplateSpecializationDecl *spec,
LVComputationKind computation) {
bool considerVisibility = shouldConsiderTemplateVisibility(spec, computation);
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
VarTemplateDecl *temp = spec->getSpecializedTemplate();
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV,
considerVisibility && !hasExplicitVisibilityAlready(computation));
// Merge information from the template arguments. We ignore
// template-argument visibility if we've got an explicit
// instantiation with a visibility attribute.
const TemplateArgumentList &templateArgs = spec->getTemplateArgs();
LinkageInfo argsLV = getLVForTemplateArgumentList(templateArgs, computation);
if (considerVisibility)
LV.mergeVisibility(argsLV);
LV.mergeExternalVisibility(argsLV);
}
static bool useInlineVisibilityHidden(const NamedDecl *D) {
#if 1 // HLSL Change Starts
return false;
#else
// FIXME: we should warn if -fvisibility-inlines-hidden is used with c.
const LangOptions &Opts = D->getASTContext().getLangOpts();
if (!Opts.CPlusPlus || !Opts.InlineVisibilityHidden)
return false;
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return false;
TemplateSpecializationKind TSK = TSK_Undeclared;
if (FunctionTemplateSpecializationInfo *spec
= FD->getTemplateSpecializationInfo()) {
TSK = spec->getTemplateSpecializationKind();
} else if (MemberSpecializationInfo *MSI =
FD->getMemberSpecializationInfo()) {
TSK = MSI->getTemplateSpecializationKind();
}
const FunctionDecl *Def = nullptr;
// InlineVisibilityHidden only applies to definitions, and
// isInlined() only gives meaningful answers on definitions
// anyway.
return TSK != TSK_ExplicitInstantiationDeclaration &&
TSK != TSK_ExplicitInstantiationDefinition &&
FD->hasBody(Def) && Def->isInlined() && !Def->hasAttr<GNUInlineAttr>();
#endif // HLSL Change Ends
}
template <typename T> static bool isFirstInExternCContext(T *D) {
const T *First = D->getFirstDecl();
return First->isInExternCContext();
}
static bool isSingleLineLanguageLinkage(const Decl &D) {
if (const LinkageSpecDecl *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
if (!SD->hasBraces())
return true;
return false;
}
static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D,
LVComputationKind computation) {
assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
"Not a name having namespace scope");
ASTContext &Context = D->getASTContext();
// C++ [basic.link]p3:
// A name having namespace scope (3.3.6) has internal linkage if it
// is the name of
// - an object, reference, function or function template that is
// explicitly declared static; or,
// (This bullet corresponds to C99 6.2.2p3.)
if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
// Explicitly declared static.
if (Var->getStorageClass() == SC_Static)
return LinkageInfo::internal();
// - a non-volatile object or reference that is explicitly declared const
// or constexpr and neither explicitly declared extern nor previously
// declared to have external linkage; or (there is no equivalent in C99)
if (Context.getLangOpts().CPlusPlus &&
Var->getType().isConstQualified() &&
!Context.getLangOpts().HLSL && // HLSL Change -Initializer used on a global 'const' variable will be ignored for hlsl.
!Var->getType().isVolatileQualified()) {
const VarDecl *PrevVar = Var->getPreviousDecl();
if (PrevVar)
return getLVForDecl(PrevVar, computation);
if (Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern &&
!isSingleLineLanguageLinkage(*Var))
return LinkageInfo::internal();
}
for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar;
PrevVar = PrevVar->getPreviousDecl()) {
if (PrevVar->getStorageClass() == SC_PrivateExtern &&
Var->getStorageClass() == SC_None)
return PrevVar->getLinkageAndVisibility();
// Explicitly declared static.
if (PrevVar->getStorageClass() == SC_Static)
return LinkageInfo::internal();
}
} else if (const FunctionDecl *Function = D->getAsFunction()) {
// C++ [temp]p4:
// A non-member function template can have internal linkage; any
// other template name shall have external linkage.
// Explicitly declared static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
return LinkageInfo(InternalLinkage, DefaultVisibility, false);
} else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
// - a data member of an anonymous union.
const VarDecl *VD = IFD->getVarDecl();
assert(VD && "Expected a VarDecl in this IndirectFieldDecl!");
return getLVForNamespaceScopeDecl(VD, computation);
}
assert(!isa<FieldDecl>(D) && "Didn't expect a FieldDecl!");
if (D->isInAnonymousNamespace()) {
const VarDecl *Var = dyn_cast<VarDecl>(D);
const FunctionDecl *Func = dyn_cast<FunctionDecl>(D);
if ((!Var || !isFirstInExternCContext(Var)) &&
(!Func || !isFirstInExternCContext(Func)))
return LinkageInfo::uniqueExternal();
}
// Set up the defaults.
// C99 6.2.2p5:
// If the declaration of an identifier for an object has file
// scope and no storage-class specifier, its linkage is
// external.
LinkageInfo LV;
if (!hasExplicitVisibilityAlready(computation)) {
if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) {
LV.mergeVisibility(*Vis, true);
} else {
// If we're declared in a namespace with a visibility attribute,
// use that namespace's visibility, and it still counts as explicit.
for (const DeclContext *DC = D->getDeclContext();
!isa<TranslationUnitDecl>(DC);
DC = DC->getParent()) {
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND) continue;
if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) {
LV.mergeVisibility(*Vis, true);
break;
}
}
}
// Add in global settings if the above didn't give us direct visibility.
if (!LV.isVisibilityExplicit()) {
// Use global type/value visibility as appropriate.
Visibility globalVisibility;
if (computation == LVForValue) {
globalVisibility = Context.getLangOpts().getValueVisibilityMode();
} else {
assert(computation == LVForType);
globalVisibility = Context.getLangOpts().getTypeVisibilityMode();
}
LV.mergeVisibility(globalVisibility, /*explicit*/ false);
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
if (useInlineVisibilityHidden(D))
LV.mergeVisibility(HiddenVisibility, true);
}
}
// C++ [basic.link]p4:
// A name having namespace scope has external linkage if it is the
// name of
//
// - an object or reference, unless it has internal linkage; or
if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
// GCC applies the following optimization to variables and static
// data members, but not to functions:
//
// Modify the variable's LV by the LV of its type unless this is
// C or extern "C". This follows from [basic.link]p9:
// A type without linkage shall not be used as the type of a
// variable or function with external linkage unless
// - the entity has C language linkage, or
// - the entity is declared within an unnamed namespace, or
// - the entity is not used or is defined in the same
// translation unit.
// and [basic.link]p10:
// ...the types specified by all declarations referring to a
// given variable or function shall be identical...
// C does not have an equivalent rule.
//
// Ignore this if we've got an explicit attribute; the user
// probably knows what they're doing.
//
// Note that we don't want to make the variable non-external
// because of this, but unique-external linkage suits us.
if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var)) {
LinkageInfo TypeLV = getLVForType(*Var->getType(), computation);
if (TypeLV.getLinkage() != ExternalLinkage)
return LinkageInfo::uniqueExternal();
if (!LV.isVisibilityExplicit())
LV.mergeVisibility(TypeLV);
}
if (Var->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
// Note that Sema::MergeVarDecl already takes care of implementing
// C99 6.2.2p4 and propagating the visibility attribute, so we don't have
// to do it here.
// As per function and class template specializations (below),
// consider LV for the template and template arguments. We're at file
// scope, so we do not need to worry about nested specializations.
if (const VarTemplateSpecializationDecl *spec
= dyn_cast<VarTemplateSpecializationDecl>(Var)) {
mergeTemplateLV(LV, spec, computation);
}
// - a function, unless it has internal linkage; or
} else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
// In theory, we can modify the function's LV by the LV of its
// type unless it has C linkage (see comment above about variables
// for justification). In practice, GCC doesn't do this, so it's
// just too painful to make work.
if (Function->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
// Note that Sema::MergeCompatibleFunctionDecls already takes care of
// merging storage classes and visibility attributes, so we don't have to
// look at previous decls in here.
// In C++, then if the type of the function uses a type with
// unique-external linkage, it's not legally usable from outside
// this translation unit. However, we should use the C linkage
// rules instead for extern "C" declarations.
if (Context.getLangOpts().CPlusPlus &&
!Function->isInExternCContext()) {
// Only look at the type-as-written. If this function has an auto-deduced
// return type, we can't compute the linkage of that type because it could
// require looking at the linkage of this function, and we don't need this
// for correctness because the type is not part of the function's
// signature.
// FIXME: This is a hack. We should be able to solve this circularity and
// the one in getLVForClassMember for Functions some other way.
QualType TypeAsWritten = Function->getType();
if (TypeSourceInfo *TSI = Function->getTypeSourceInfo())
TypeAsWritten = TSI->getType();
if (TypeAsWritten->getLinkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
}
// Consider LV from the template and the template arguments.
// We're at file scope, so we do not need to worry about nested
// specializations.
if (FunctionTemplateSpecializationInfo *specInfo
= Function->getTemplateSpecializationInfo()) {
mergeTemplateLV(LV, Function, specInfo, computation);
}
// - a named class (Clause 9), or an unnamed class defined in a
// typedef declaration in which the class has the typedef name
// for linkage purposes (7.1.3); or
// - a named enumeration (7.2), or an unnamed enumeration
// defined in a typedef declaration in which the enumeration
// has the typedef name for linkage purposes (7.1.3); or
} else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) {
// Unnamed tags have no linkage.
if (!Tag->hasNameForLinkage())
return LinkageInfo::none();
// If this is a class template specialization, consider the
// linkage of the template and template arguments. We're at file
// scope, so we do not need to worry about nested specializations.
if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
mergeTemplateLV(LV, spec, computation);
}
// - an enumerator belonging to an enumeration with external linkage;
} else if (isa<EnumConstantDecl>(D)) {
LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()),
computation);
if (!isExternalFormalLinkage(EnumLV.getLinkage()))
return LinkageInfo::none();
LV.merge(EnumLV);
// - a template, unless it is a function template that has
// internal linkage (Clause 14);
} else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
bool considerVisibility = !hasExplicitVisibilityAlready(computation);
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
// - a namespace (7.3), unless it is declared within an unnamed
// namespace.
} else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
return LV;
// By extension, we assign external linkage to Objective-C
// interfaces.
} else if (isa<ObjCInterfaceDecl>(D)) {
// fallout
// Everything not covered here has no linkage.
} else {
// FIXME: A typedef declaration has linkage if it gives a type a name for
// linkage purposes.
return LinkageInfo::none();
}
// If we ended up with non-external linkage, visibility should
// always be default.
if (LV.getLinkage() != ExternalLinkage)
return LinkageInfo(LV.getLinkage(), DefaultVisibility, false);
return LV;
}
static LinkageInfo getLVForClassMember(const NamedDecl *D,
LVComputationKind computation) {
// Only certain class members have linkage. Note that fields don't
// really have linkage, but it's convenient to say they do for the
// purposes of calculating linkage of pointer-to-data-member
// template arguments.
//
// Templates also don't officially have linkage, but since we ignore
// the C++ standard and look at template arguments when determining
// linkage and visibility of a template specialization, we might hit
// a template template argument that way. If we do, we need to
// consider its linkage.
if (!(isa<CXXMethodDecl>(D) ||
isa<VarDecl>(D) ||
isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
isa<TagDecl>(D) ||
isa<TemplateDecl>(D)))
return LinkageInfo::none();
LinkageInfo LV;
// If we have an explicit visibility attribute, merge that in.
if (!hasExplicitVisibilityAlready(computation)) {
if (Optional<Visibility> Vis = getExplicitVisibility(D, computation))
LV.mergeVisibility(*Vis, true);
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
//
// Note that we do this before merging information about
// the class visibility.
if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D))
LV.mergeVisibility(HiddenVisibility, true);
}
// If this class member has an explicit visibility attribute, the only
// thing that can change its visibility is the template arguments, so
// only look for them when processing the class.
LVComputationKind classComputation = computation;
if (LV.isVisibilityExplicit())
classComputation = withExplicitVisibilityAlready(computation);
LinkageInfo classLV =
getLVForDecl(cast<RecordDecl>(D->getDeclContext()), classComputation);
// If the class already has unique-external linkage, we can't improve.
if (classLV.getLinkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
if (!isExternallyVisible(classLV.getLinkage()))
return LinkageInfo::none();
// Otherwise, don't merge in classLV yet, because in certain cases
// we need to completely ignore the visibility from it.
// Specifically, if this decl exists and has an explicit attribute.
const NamedDecl *explicitSpecSuppressor = nullptr;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
// If the type of the function uses a type with unique-external
// linkage, it's not legally usable from outside this translation unit.
// But only look at the type-as-written. If this function has an
// auto-deduced return type, we can't compute the linkage of that type
// because it could require looking at the linkage of this function, and we
// don't need this for correctness because the type is not part of the
// function's signature.
// FIXME: This is a hack. We should be able to solve this circularity and
// the one in getLVForNamespaceScopeDecl for Functions some other way.
{
QualType TypeAsWritten = MD->getType();
if (TypeSourceInfo *TSI = MD->getTypeSourceInfo())
TypeAsWritten = TSI->getType();
if (TypeAsWritten->getLinkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
}
// If this is a method template specialization, use the linkage for
// the template parameters and arguments.
if (FunctionTemplateSpecializationInfo *spec
= MD->getTemplateSpecializationInfo()) {
mergeTemplateLV(LV, MD, spec, computation);
if (spec->isExplicitSpecialization()) {
explicitSpecSuppressor = MD;
} else if (isExplicitMemberSpecialization(spec->getTemplate())) {
explicitSpecSuppressor = spec->getTemplate()->getTemplatedDecl();
}
} else if (isExplicitMemberSpecialization(MD)) {
explicitSpecSuppressor = MD;
}
} else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
mergeTemplateLV(LV, spec, computation);
if (spec->isExplicitSpecialization()) {
explicitSpecSuppressor = spec;
} else {
const ClassTemplateDecl *temp = spec->getSpecializedTemplate();
if (isExplicitMemberSpecialization(temp)) {
explicitSpecSuppressor = temp->getTemplatedDecl();
}
}
} else if (isExplicitMemberSpecialization(RD)) {
explicitSpecSuppressor = RD;
}
// Static data members.
} else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// HLSL Change: Make static data member internal linkage. This is to avoid confusion between global constant buffer
if (VD->getStorageClass() == StorageClass::SC_Static)
LV.setLinkage(Linkage::InternalLinkage);
if (const VarTemplateSpecializationDecl *spec
= dyn_cast<VarTemplateSpecializationDecl>(VD))
mergeTemplateLV(LV, spec, computation);
// Modify the variable's linkage by its type, but ignore the
// type's visibility unless it's a definition.
LinkageInfo typeLV = getLVForType(*VD->getType(), computation);
if (!LV.isVisibilityExplicit() && !classLV.isVisibilityExplicit())
LV.mergeVisibility(typeLV);
LV.mergeExternalVisibility(typeLV);
if (isExplicitMemberSpecialization(VD)) {
explicitSpecSuppressor = VD;
}
// Template members.
} else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
bool considerVisibility =
(!LV.isVisibilityExplicit() &&
!classLV.isVisibilityExplicit() &&
!hasExplicitVisibilityAlready(computation));
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
if (const RedeclarableTemplateDecl *redeclTemp =
dyn_cast<RedeclarableTemplateDecl>(temp)) {
if (isExplicitMemberSpecialization(redeclTemp)) {
explicitSpecSuppressor = temp->getTemplatedDecl();
}
}
}
// We should never be looking for an attribute directly on a template.
assert(!explicitSpecSuppressor || !isa<TemplateDecl>(explicitSpecSuppressor));
// If this member is an explicit member specialization, and it has
// an explicit attribute, ignore visibility from the parent.
bool considerClassVisibility = true;
if (explicitSpecSuppressor &&
// optimization: hasDVA() is true only with explicit visibility.
LV.isVisibilityExplicit() &&
classLV.getVisibility() != DefaultVisibility &&
hasDirectVisibilityAttribute(explicitSpecSuppressor, computation)) {
considerClassVisibility = false;
}
// Finally, merge in information from the class.
LV.mergeMaybeWithVisibility(classLV, considerClassVisibility);
return LV;
}
void NamedDecl::anchor() { }
static LinkageInfo computeLVForDecl(const NamedDecl *D,
LVComputationKind computation);
bool NamedDecl::isLinkageValid() const {
if (!hasCachedLinkage())
return true;
return computeLVForDecl(this, LVForLinkageOnly).getLinkage() ==
getCachedLinkage();
}
ObjCStringFormatFamily NamedDecl::getObjCFStringFormattingFamily() const {
StringRef name = getName();
if (name.empty()) return SFF_None;
if (name.front() == 'C')
if (name == "CFStringCreateWithFormat" ||
name == "CFStringCreateWithFormatAndArguments" ||
name == "CFStringAppendFormat" ||
name == "CFStringAppendFormatAndArguments")
return SFF_CFString;
return SFF_None;
}
Linkage NamedDecl::getLinkageInternal() const {
// We don't care about visibility here, so ask for the cheapest
// possible visibility analysis.
return getLVForDecl(this, LVForLinkageOnly).getLinkage();
}
LinkageInfo NamedDecl::getLinkageAndVisibility() const {
LVComputationKind computation =
(usesTypeVisibility(this) ? LVForType : LVForValue);
return getLVForDecl(this, computation);
}
static Optional<Visibility>
getExplicitVisibilityAux(const NamedDecl *ND,
NamedDecl::ExplicitVisibilityKind kind,
bool IsMostRecent) {
assert(!IsMostRecent || ND == ND->getMostRecentDecl());
// Check the declaration itself first.
if (Optional<Visibility> V = getVisibilityOf(ND, kind))
return V;
// If this is a member class of a specialization of a class template
// and the corresponding decl has explicit visibility, use that.
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND)) {
CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
if (InstantiatedFrom)
return getVisibilityOf(InstantiatedFrom, kind);
}
// If there wasn't explicit visibility there, and this is a
// specialization of a class template, check for visibility
// on the pattern.
if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(ND))
return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl(),
kind);
// Use the most recent declaration.
if (!IsMostRecent && !isa<NamespaceDecl>(ND)) {
const NamedDecl *MostRecent = ND->getMostRecentDecl();
if (MostRecent != ND)
return getExplicitVisibilityAux(MostRecent, kind, true);
}
if (const VarDecl *Var = dyn_cast<VarDecl>(ND)) {
if (Var->isStaticDataMember()) {
VarDecl *InstantiatedFrom = Var->getInstantiatedFromStaticDataMember();
if (InstantiatedFrom)
return getVisibilityOf(InstantiatedFrom, kind);
}
if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Var))
return getVisibilityOf(VTSD->getSpecializedTemplate()->getTemplatedDecl(),
kind);
return None;
}
// Also handle function template specializations.
if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) {
// If the function is a specialization of a template with an
// explicit visibility attribute, use that.
if (FunctionTemplateSpecializationInfo *templateInfo
= fn->getTemplateSpecializationInfo())
return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl(),
kind);
// If the function is a member of a specialization of a class template
// and the corresponding decl has explicit visibility, use that.
FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction();
if (InstantiatedFrom)
return getVisibilityOf(InstantiatedFrom, kind);
return None;
}
// The visibility of a template is stored in the templated decl.
if (const TemplateDecl *TD = dyn_cast<TemplateDecl>(ND))
return getVisibilityOf(TD->getTemplatedDecl(), kind);
return None;
}
Optional<Visibility>
NamedDecl::getExplicitVisibility(ExplicitVisibilityKind kind) const {
return getExplicitVisibilityAux(this, kind, false);
}
static LinkageInfo getLVForClosure(const DeclContext *DC, Decl *ContextDecl,
LVComputationKind computation) {
// This lambda has its linkage/visibility determined by its owner.
if (ContextDecl) {
if (isa<ParmVarDecl>(ContextDecl))
DC = ContextDecl->getDeclContext()->getRedeclContext();
else
return getLVForDecl(cast<NamedDecl>(ContextDecl), computation);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
return getLVForDecl(ND, computation);
return LinkageInfo::external();
}
static LinkageInfo getLVForLocalDecl(const NamedDecl *D,
LVComputationKind computation) {
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace() &&
!Function->isInExternCContext())
return LinkageInfo::uniqueExternal();
// This is a "void f();" which got merged with a file static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
return LinkageInfo::internal();
LinkageInfo LV;
if (!hasExplicitVisibilityAlready(computation)) {
if (Optional<Visibility> Vis =
getExplicitVisibility(Function, computation))
LV.mergeVisibility(*Vis, true);
}
// Note that Sema::MergeCompatibleFunctionDecls already takes care of
// merging storage classes and visibility attributes, so we don't have to
// look at previous decls in here.
return LV;
}
if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
if (Var->hasExternalStorage()) {
if (Var->isInAnonymousNamespace() && !Var->isInExternCContext())
return LinkageInfo::uniqueExternal();
LinkageInfo LV;
if (Var->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
else if (!hasExplicitVisibilityAlready(computation)) {
if (Optional<Visibility> Vis = getExplicitVisibility(Var, computation))
LV.mergeVisibility(*Vis, true);
}
if (const VarDecl *Prev = Var->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(Prev, computation);
if (PrevLV.getLinkage())
LV.setLinkage(PrevLV.getLinkage());
LV.mergeVisibility(PrevLV);
}
return LV;
}
if (!Var->isStaticLocal())
return LinkageInfo::none();
}
ASTContext &Context = D->getASTContext();
if (!Context.getLangOpts().CPlusPlus)
return LinkageInfo::none();
const Decl *OuterD = getOutermostFuncOrBlockContext(D);
if (!OuterD)
return LinkageInfo::none();
LinkageInfo LV;
if (const BlockDecl *BD = dyn_cast<BlockDecl>(OuterD)) {
if (!BD->getBlockManglingNumber())
return LinkageInfo::none();
LV = getLVForClosure(BD->getDeclContext()->getRedeclContext(),
BD->getBlockManglingContextDecl(), computation);
} else {
const FunctionDecl *FD = cast<FunctionDecl>(OuterD);
if (!FD->isInlined() &&
!isTemplateInstantiation(FD->getTemplateSpecializationKind()))
return LinkageInfo::none();
LV = getLVForDecl(FD, computation);
}
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo::none();
return LinkageInfo(VisibleNoLinkage, LV.getVisibility(),
LV.isVisibilityExplicit());
}
static inline const CXXRecordDecl*
getOutermostEnclosingLambda(const CXXRecordDecl *Record) {
const CXXRecordDecl *Ret = Record;
while (Record && Record->isLambda()) {
Ret = Record;
if (!Record->getParent()) break;
// Get the Containing Class of this Lambda Class
Record = dyn_cast_or_null<CXXRecordDecl>(
Record->getParent()->getParent());
}
return Ret;
}
static LinkageInfo computeLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
// Objective-C: treat all Objective-C declarations as having external
// linkage.
switch (D->getKind()) {
default:
break;
case Decl::ParmVar:
return LinkageInfo::none();
case Decl::TemplateTemplateParm: // count these as external
case Decl::NonTypeTemplateParm:
case Decl::ObjCAtDefsField:
case Decl::ObjCCategory:
case Decl::ObjCCategoryImpl:
case Decl::ObjCCompatibleAlias:
case Decl::ObjCImplementation:
case Decl::ObjCMethod:
case Decl::ObjCProperty:
case Decl::ObjCPropertyImpl:
case Decl::ObjCProtocol:
return LinkageInfo::external();
case Decl::CXXRecord: {
const CXXRecordDecl *Record = cast<CXXRecordDecl>(D);
if (Record->isLambda()) {
if (!Record->getLambdaManglingNumber()) {
// This lambda has no mangling number, so it's internal.
return LinkageInfo::internal();
}
// This lambda has its linkage/visibility determined:
// - either by the outermost lambda if that lambda has no mangling
// number.
// - or by the parent of the outer most lambda
// This prevents infinite recursion in settings such as nested lambdas
// used in NSDMI's, for e.g.
// struct L {
// int t{};
// int t2 = ([](int a) { return [](int b) { return b; };})(t)(t);
// };
const CXXRecordDecl *OuterMostLambda =
getOutermostEnclosingLambda(Record);
if (!OuterMostLambda->getLambdaManglingNumber())
return LinkageInfo::internal();
return getLVForClosure(
OuterMostLambda->getDeclContext()->getRedeclContext(),
OuterMostLambda->getLambdaContextDecl(), computation);
}
break;
}
}
// Handle linkage for namespace-scope names.
if (D->getDeclContext()->getRedeclContext()->isFileContext())
return getLVForNamespaceScopeDecl(D, computation);
// C++ [basic.link]p5:
// In addition, a member function, static data member, a named
// class or enumeration of class scope, or an unnamed class or
// enumeration defined in a class-scope typedef declaration such
// that the class or enumeration has the typedef name for linkage
// purposes (7.1.3), has external linkage if the name of the class
// has external linkage.
if (D->getDeclContext()->isRecord())
return getLVForClassMember(D, computation);
// C++ [basic.link]p6:
// The name of a function declared in block scope and the name of
// an object declared by a block scope extern declaration have
// linkage. If there is a visible declaration of an entity with
// linkage having the same name and type, ignoring entities
// declared outside the innermost enclosing namespace scope, the
// block scope declaration declares that same entity and receives
// the linkage of the previous declaration. If there is more than
// one such matching entity, the program is ill-formed. Otherwise,
// if no matching entity is found, the block scope entity receives
// external linkage.
if (D->getDeclContext()->isFunctionOrMethod())
return getLVForLocalDecl(D, computation);
// C++ [basic.link]p6:
// Names not covered by these rules have no linkage.
return LinkageInfo::none();
}
namespace clang {
class LinkageComputer {
public:
static LinkageInfo getLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
if (computation == LVForLinkageOnly && D->hasCachedLinkage())
return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false);
LinkageInfo LV = computeLVForDecl(D, computation);
if (D->hasCachedLinkage())
assert(D->getCachedLinkage() == LV.getLinkage());
D->setCachedLinkage(LV.getLinkage());
#ifndef NDEBUG
// In C (because of gnu inline) and in c++ with microsoft extensions an
// static can follow an extern, so we can have two decls with different
// linkages.
const LangOptions &Opts = D->getASTContext().getLangOpts();
(void)(Opts); // HLSL Change - this only has static consts referenced
if (!Opts.CPlusPlus || Opts.MicrosoftExt)
return LV;
// We have just computed the linkage for this decl. By induction we know
// that all other computed linkages match, check that the one we just
// computed also does.
NamedDecl *Old = nullptr;
for (auto I : D->redecls()) {
NamedDecl *T = cast<NamedDecl>(I);
if (T == D)
continue;
if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
Old = T;
break;
}
}
assert(!Old || Old->getCachedLinkage() == D->getCachedLinkage());
#endif
return LV;
}
};
}
static LinkageInfo getLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
return clang::LinkageComputer::getLVForDecl(D, computation);
}
std::string NamedDecl::getQualifiedNameAsString() const {
std::string QualName;
llvm::raw_string_ostream OS(QualName);
printQualifiedName(OS, getASTContext().getPrintingPolicy());
return OS.str();
}
void NamedDecl::printQualifiedName(raw_ostream &OS) const {
printQualifiedName(OS, getASTContext().getPrintingPolicy());
}
void NamedDecl::printQualifiedName(raw_ostream &OS,
const PrintingPolicy &P) const {
const DeclContext *Ctx = getDeclContext();
if (Ctx->isFunctionOrMethod()) {
printName(OS);
return;
}
typedef SmallVector<const DeclContext *, 8> ContextsTy;
ContextsTy Contexts;
// Collect contexts.
while (Ctx && isa<NamedDecl>(Ctx)) {
Contexts.push_back(Ctx);
Ctx = Ctx->getParent();
}
for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend();
I != E; ++I) {
if (const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
OS << Spec->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
TemplateSpecializationType::PrintTemplateArgumentList(OS,
TemplateArgs.data(),
TemplateArgs.size(),
P);
} else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
if (P.SuppressUnwrittenScope &&
(ND->isAnonymousNamespace() || ND->isInline()))
continue;
if (ND->isAnonymousNamespace())
OS << "(anonymous namespace)";
else
OS << *ND;
// HLSL Change Begin - not add cbuffer name to qualified name.
} else if (isa<HLSLBufferDecl>(*I)) {
continue;
// HLSL Change End.
} else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
if (!RD->getIdentifier())
OS << "(anonymous " << RD->getKindName() << ')';
else
OS << *RD;
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
const FunctionProtoType *FT = nullptr;
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(FD->getType()->castAs<FunctionType>());
OS << *FD << '(';
if (FT) {
unsigned NumParams = FD->getNumParams();
for (unsigned i = 0; i < NumParams; ++i) {
if (i)
OS << ", ";
OS << FD->getParamDecl(i)->getType().stream(P);
}
if (FT->isVariadic()) {
if (NumParams > 0)
OS << ", ";
OS << "...";
}
}
OS << ')';
} else {
OS << *cast<NamedDecl>(*I);
}
OS << "::";
}
if (getDeclName())
OS << *this;
else
OS << "(anonymous)";
}
void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
const PrintingPolicy &Policy,
bool Qualified) const {
if (Qualified)
printQualifiedName(OS, Policy);
else
printName(OS);
}
static bool isKindReplaceableBy(Decl::Kind OldK, Decl::Kind NewK) {
// For method declarations, we never replace.
if (ObjCMethodDecl::classofKind(NewK))
return false;
if (OldK == NewK)
return true;
// A compatibility alias for a class can be replaced by an interface.
if (ObjCCompatibleAliasDecl::classofKind(OldK) &&
ObjCInterfaceDecl::classofKind(NewK))
return true;
// A typedef-declaration, alias-declaration, or Objective-C class declaration
// can replace another declaration of the same type. Semantic analysis checks
// that we have matching types.
if ((TypedefNameDecl::classofKind(OldK) ||
ObjCInterfaceDecl::classofKind(OldK)) &&
(TypedefNameDecl::classofKind(NewK) ||
ObjCInterfaceDecl::classofKind(NewK)))
return true;
// Otherwise, a kind mismatch implies that the declaration is not replaced.
return false;
}
template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) {
return true;
}
static bool isRedeclarableImpl(...) { return false; }
static bool isRedeclarable(Decl::Kind K) {
switch (K) {
#define DECL(Type, Base) \
case Decl::Type: \
return isRedeclarableImpl((Type##Decl *)nullptr);
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
llvm_unreachable("unknown decl kind");
}
bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
// Never replace one imported declaration with another; we need both results
// when re-exporting.
if (OldD->isFromASTFile() && isFromASTFile())
return false;
if (!isKindReplaceableBy(OldD->getKind(), getKind()))
return false;
// Inline namespaces can give us two declarations with the same
// name and kind in the same scope but different contexts; we should
// keep both declarations in this case.
if (!this->getDeclContext()->getRedeclContext()->Equals(
OldD->getDeclContext()->getRedeclContext()))
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
// For function declarations, we keep track of redeclarations.
// FIXME: This returns false for functions that should in fact be replaced.
// Instead, perform some kind of type check?
if (FD->getPreviousDecl() != OldD)
return false;
// For function templates, the underlying function declarations are linked.
if (const FunctionTemplateDecl *FunctionTemplate =
dyn_cast<FunctionTemplateDecl>(this))
return FunctionTemplate->getTemplatedDecl()->declarationReplaces(
cast<FunctionTemplateDecl>(OldD)->getTemplatedDecl());
// Using shadow declarations can be overloaded on their target declarations
// if they introduce functions.
// FIXME: If our target replaces the old target, can we replace the old
// shadow declaration?
if (auto *USD = dyn_cast<UsingShadowDecl>(this))
if (USD->getTargetDecl() != cast<UsingShadowDecl>(OldD)->getTargetDecl())
return false;
// Using declarations can be overloaded if they introduce functions.
if (auto *UD = dyn_cast<UsingDecl>(this)) {
ASTContext &Context = getASTContext();
return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
Context.getCanonicalNestedNameSpecifier(
cast<UsingDecl>(OldD)->getQualifier());
}
if (auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
ASTContext &Context = getASTContext();
return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) ==
Context.getCanonicalNestedNameSpecifier(
cast<UnresolvedUsingValueDecl>(OldD)->getQualifier());
}
// UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
// We want to keep it, unless it nominates same namespace.
if (auto *UD = dyn_cast<UsingDirectiveDecl>(this))
return UD->getNominatedNamespace()->getOriginalNamespace() ==
cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace()
->getOriginalNamespace();
if (!IsKnownNewer && isRedeclarable(getKind())) {
// Check whether this is actually newer than OldD. We want to keep the
// newer declaration. This loop will usually only iterate once, because
// OldD is usually the previous declaration.
for (auto D : redecls()) {
if (D == OldD)
break;
// If we reach the canonical declaration, then OldD is not actually older
// than this one.
//
// FIXME: In this case, we should not add this decl to the lookup table.
if (D->isCanonicalDecl())
return false;
}
}
// It's a newer declaration of the same kind of declaration in the same scope,
// and not an overload: we want this decl instead of the existing one.
return true;
}
bool NamedDecl::hasLinkage() const {
return getFormalLinkage() != NoLinkage;
}
NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
NamedDecl *ND = this;
while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
ND = UD->getTargetDecl();
if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
return AD->getClassInterface();
return ND;
}
bool NamedDecl::isCXXInstanceMember() const {
if (!isCXXClassMember())
return false;
const NamedDecl *D = this;
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
return true;
if (const CXXMethodDecl *MD =
dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
return MD->isInstance();
return false;
}
//===----------------------------------------------------------------------===//
// DeclaratorDecl Implementation
//===----------------------------------------------------------------------===//
template <typename DeclT>
static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
if (decl->getNumTemplateParameterLists() > 0)
return decl->getTemplateParameterList(0)->getTemplateLoc();
else
return decl->getInnerLocStart();
}
SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
TypeSourceInfo *TSI = getTypeSourceInfo();
if (TSI) return TSI->getTypeLoc().getBeginLoc();
return SourceLocation();
}
void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
if (QualifierLoc) {
// Make sure the extended decl info is allocated.
if (!hasExtInfo()) {
// Save (non-extended) type source info pointer.
TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
// Allocate external info struct.
DeclInfo = new (getASTContext()) ExtInfo;
// Restore savedTInfo into (extended) decl info.
getExtInfo()->TInfo = savedTInfo;
}
// Set qualifier info.
getExtInfo()->QualifierLoc = QualifierLoc;
} else {
// Here Qualifier == 0, i.e., we are removing the qualifier (if any).
if (hasExtInfo()) {
if (getExtInfo()->NumTemplParamLists == 0) {
// Save type source info pointer.
TypeSourceInfo *savedTInfo = getExtInfo()->TInfo;
// Deallocate the extended decl info.
getASTContext().Deallocate(getExtInfo());
// Restore savedTInfo into (non-extended) decl info.
DeclInfo = savedTInfo;
}
else
getExtInfo()->QualifierLoc = QualifierLoc;
}
}
}
void
DeclaratorDecl::setTemplateParameterListsInfo(ASTContext &Context,
unsigned NumTPLists,
TemplateParameterList **TPLists) {
assert(NumTPLists > 0);
// Make sure the extended decl info is allocated.
if (!hasExtInfo()) {
// Save (non-extended) type source info pointer.
TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
// Allocate external info struct.
DeclInfo = new (getASTContext()) ExtInfo;
// Restore savedTInfo into (extended) decl info.
getExtInfo()->TInfo = savedTInfo;
}
// Set the template parameter lists info.
getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
}
SourceLocation DeclaratorDecl::getOuterLocStart() const {
return getTemplateOrInnerLocStart(this);
}
namespace {
// Helper function: returns true if QT is or contains a type
// having a postfix component.
bool typeIsPostfix(clang::QualType QT) {
while (true) {
const Type* T = QT.getTypePtr();
switch (T->getTypeClass()) {
default:
return false;
case Type::Pointer:
QT = cast<PointerType>(T)->getPointeeType();
break;
case Type::BlockPointer:
QT = cast<BlockPointerType>(T)->getPointeeType();
break;
case Type::MemberPointer:
QT = cast<MemberPointerType>(T)->getPointeeType();
break;
case Type::LValueReference:
case Type::RValueReference:
QT = cast<ReferenceType>(T)->getPointeeType();
break;
case Type::PackExpansion:
QT = cast<PackExpansionType>(T)->getPattern();
break;
case Type::Paren:
case Type::ConstantArray:
case Type::DependentSizedArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::FunctionProto:
case Type::FunctionNoProto:
return true;
}
}
}
} // namespace
SourceRange DeclaratorDecl::getSourceRange() const {
SourceLocation RangeEnd = getLocation();
if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
// If the declaration has no name or the type extends past the name take the
// end location of the type.
if (!getDeclName() || typeIsPostfix(TInfo->getType()))
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
}
return SourceRange(getOuterLocStart(), RangeEnd);
}
void
QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
unsigned NumTPLists,
TemplateParameterList **TPLists) {
assert((NumTPLists == 0 || TPLists != nullptr) &&
"Empty array of template parameters with positive size!");
// Free previous template parameters (if any).
if (NumTemplParamLists > 0) {
Context.Deallocate(TemplParamLists);
TemplParamLists = nullptr;
NumTemplParamLists = 0;
}
// Set info on matched template parameter lists (if any).
if (NumTPLists > 0) {
TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
NumTemplParamLists = NumTPLists;
std::copy(TPLists, TPLists + NumTPLists, TemplParamLists);
}
}
//===----------------------------------------------------------------------===//
// VarDecl Implementation
//===----------------------------------------------------------------------===//
const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
switch (SC) {
case SC_None: break;
case SC_Auto: return "auto";
case SC_Extern: return "extern";
case SC_OpenCLWorkGroupLocal: return "<<work-group-local>>";
case SC_PrivateExtern: return "__private_extern__";
case SC_Register: return "register";
case SC_Static: return "static";
}
llvm_unreachable("Invalid storage class");
}
VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass SC)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
redeclarable_base(C), Init() {
static_assert(sizeof(VarDeclBitfields) <= sizeof(unsigned),
"VarDeclBitfields too large!");
static_assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned),
"ParmVarDeclBitfields too large!");
static_assert(sizeof(NonParmVarDeclBitfields) <= sizeof(unsigned),
"NonParmVarDeclBitfields too large!");
AllBits = 0;
VarDeclBits.SClass = SC;
// Everything else is implicitly initialized to false.
}
VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartL, SourceLocation IdL,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass S) {
return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S);
}
VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID)
VarDecl(Var, C, nullptr, SourceLocation(), SourceLocation(), nullptr,
QualType(), nullptr, SC_None);
}
void VarDecl::setStorageClass(StorageClass SC) {
assert(isLegalForVariable(SC));
VarDeclBits.SClass = SC;
}
VarDecl::TLSKind VarDecl::getTLSKind() const {
switch (VarDeclBits.TSCSpec) {
case TSCS_unspecified:
if (!hasAttr<ThreadAttr>() &&
!(getASTContext().getLangOpts().OpenMPUseTLS &&
getASTContext().getTargetInfo().isTLSSupported() &&
hasAttr<OMPThreadPrivateDeclAttr>()))
return TLS_None;
return ((getASTContext().getLangOpts().isCompatibleWithMSVC(
LangOptions::MSVC2015)) ||
hasAttr<OMPThreadPrivateDeclAttr>())
? TLS_Dynamic
: TLS_Static;
case TSCS___thread: // Fall through.
case TSCS__Thread_local:
return TLS_Static;
case TSCS_thread_local:
return TLS_Dynamic;
}
llvm_unreachable("Unknown thread storage class specifier!");
}
SourceRange VarDecl::getSourceRange() const {
if (const Expr *Init = getInit()) {
SourceLocation InitEnd = Init->getLocEnd();
// If Init is implicit, ignore its source range and fallback on
// DeclaratorDecl::getSourceRange() to handle postfix elements.
if (InitEnd.isValid() && InitEnd != getLocation())
return SourceRange(getOuterLocStart(), InitEnd);
}
return DeclaratorDecl::getSourceRange();
}
template<typename T>
static LanguageLinkage getDeclLanguageLinkage(const T &D) {
// C++ [dcl.link]p1: All function types, function names with external linkage,
// and variable names with external linkage have a language linkage.
if (!D.hasExternalFormalLinkage())
return NoLanguageLinkage;
// Language linkage is a C++ concept, but saying that everything else in C has
// C language linkage fits the implementation nicely.
ASTContext &Context = D.getASTContext();
if (!Context.getLangOpts().CPlusPlus)
return CLanguageLinkage;
// C++ [dcl.link]p4: A C language linkage is ignored in determining the
// language linkage of the names of class members and the function type of
// class member functions.
const DeclContext *DC = D.getDeclContext();
if (DC->isRecord())
return CXXLanguageLinkage;
// If the first decl is in an extern "C" context, any other redeclaration
// will have C language linkage. If the first one is not in an extern "C"
// context, we would have reported an error for any other decl being in one.
if (isFirstInExternCContext(&D))
return CLanguageLinkage;
return CXXLanguageLinkage;
}
template<typename T>
static bool isDeclExternC(const T &D) {
// Since the context is ignored for class members, they can only have C++
// language linkage or no language linkage.
const DeclContext *DC = D.getDeclContext();
if (DC->isRecord()) {
assert(D.getASTContext().getLangOpts().CPlusPlus);
return false;
}
return D.getLanguageLinkage() == CLanguageLinkage;
}
LanguageLinkage VarDecl::getLanguageLinkage() const {
return getDeclLanguageLinkage(*this);
}
bool VarDecl::isExternC() const {
return isDeclExternC(*this);
}
bool VarDecl::isInExternCContext() const {
return getLexicalDeclContext()->isExternCContext();
}
bool VarDecl::isInExternCXXContext() const {
return getLexicalDeclContext()->isExternCXXContext();
}
VarDecl *VarDecl::getCanonicalDecl() { return getFirstDecl(); }
VarDecl::DefinitionKind
VarDecl::isThisDeclarationADefinition(ASTContext &C) const {
// C++ [basic.def]p2:
// A declaration is a definition unless [...] it contains the 'extern'
// specifier or a linkage-specification and neither an initializer [...],
// it declares a static data member in a class declaration [...].
// C++1y [temp.expl.spec]p15:
// An explicit specialization of a static data member or an explicit
// specialization of a static data member template is a definition if the
// declaration includes an initializer; otherwise, it is a declaration.
//
// FIXME: How do you declare (but not define) a partial specialization of
// a static data member template outside the containing class?
if (isStaticDataMember()) {
if (isOutOfLine() &&
(hasInit() ||
// If the first declaration is out-of-line, this may be an
// instantiation of an out-of-line partial specialization of a variable
// template for which we have not yet instantiated the initializer.
(getFirstDecl()->isOutOfLine()
? getTemplateSpecializationKind() == TSK_Undeclared
: getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization) ||
isa<VarTemplatePartialSpecializationDecl>(this)))
return Definition;
else
return DeclarationOnly;
}
// C99 6.7p5:
// A definition of an identifier is a declaration for that identifier that
// [...] causes storage to be reserved for that object.
// Note: that applies for all non-file-scope objects.
// C99 6.9.2p1:
// If the declaration of an identifier for an object has file scope and an
// initializer, the declaration is an external definition for the identifier
if (hasInit())
return Definition;
if (hasAttr<AliasAttr>())
return Definition;
if (const auto *SAA = getAttr<SelectAnyAttr>())
if (!SAA->isInherited())
return Definition;
// A variable template specialization (other than a static data member
// template or an explicit specialization) is a declaration until we
// instantiate its initializer.
if (isa<VarTemplateSpecializationDecl>(this) &&
getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
return DeclarationOnly;
if (!getASTContext().getLangOpts().HLSL) // HLSL Change - take extern as define to match fxc.
if (hasExternalStorage())
return DeclarationOnly;
// [dcl.link] p7:
// A declaration directly contained in a linkage-specification is treated
// as if it contains the extern specifier for the purpose of determining
// the linkage of the declared name and whether it is a definition.
if (isSingleLineLanguageLinkage(*this))
return DeclarationOnly;
// C99 6.9.2p2:
// A declaration of an object that has file scope without an initializer,
// and without a storage class specifier or the scs 'static', constitutes
// a tentative definition.
// No such thing in C++.
if (!C.getLangOpts().CPlusPlus && isFileVarDecl())
return TentativeDefinition;
// What's left is (in C, block-scope) declarations without initializers or
// external storage. These are definitions.
return Definition;
}
VarDecl *VarDecl::getActingDefinition() {
DefinitionKind Kind = isThisDeclarationADefinition();
if (Kind != TentativeDefinition)
return nullptr;
VarDecl *LastTentative = nullptr;
VarDecl *First = getFirstDecl();
for (auto I : First->redecls()) {
Kind = I->isThisDeclarationADefinition();
if (Kind == Definition)
return nullptr;
else if (Kind == TentativeDefinition)
LastTentative = I;
}
return LastTentative;
}
VarDecl *VarDecl::getDefinition(ASTContext &C) {
VarDecl *First = getFirstDecl();
for (auto I : First->redecls()) {
if (I->isThisDeclarationADefinition(C) == Definition)
return I;
}
return nullptr;
}
VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
DefinitionKind Kind = DeclarationOnly;
const VarDecl *First = getFirstDecl();
for (auto I : First->redecls()) {
Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
if (Kind == Definition)
break;
}
return Kind;
}
const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
for (auto I : redecls()) {
if (auto Expr = I->getInit()) {
D = I;
return Expr;
}
}
return nullptr;
}
bool VarDecl::isOutOfLine() const {
if (Decl::isOutOfLine())
return true;
if (!isStaticDataMember())
return false;
// If this static data member was instantiated from a static data member of
// a class template, check whether that static data member was defined
// out-of-line.
if (VarDecl *VD = getInstantiatedFromStaticDataMember())
return VD->isOutOfLine();
return false;
}
VarDecl *VarDecl::getOutOfLineDefinition() {
if (!isStaticDataMember())
return nullptr;
for (auto RD : redecls()) {
if (RD->getLexicalDeclContext()->isFileContext())
return RD;
}
return nullptr;
}
void VarDecl::setInit(Expr *I) {
if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
Eval->~EvaluatedStmt();
getASTContext().Deallocate(Eval);
}
Init = I;
}
bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
const LangOptions &Lang = C.getLangOpts();
(void)(Lang); // HLSL Change - this object is only accessed through static consts
// HLSL Change: non-static globals are constant buffer variables that look
// like const globals, but cannot be used in constant expressions.
if (Lang.HLSL && hasGlobalStorage() && getStorageClass() != SC_Static)
return false;
if (!Lang.CPlusPlus)
return false;
// In C++11, any variable of reference type can be used in a constant
// expression if it is initialized by a constant expression.
if (Lang.CPlusPlus11 && getType()->isReferenceType())
return true;
// Only const objects can be used in constant expressions in C++. C++98 does
// not require the variable to be non-volatile, but we consider this to be a
// defect.
if (!getType().isConstQualified() || getType().isVolatileQualified())
return false;
// In C++, const, non-volatile variables of integral or enumeration types
// can be used in constant expressions.
if (getType()->isIntegralOrEnumerationType())
return true;
// Additionally, in C++11, non-volatile constexpr variables can be used in
// constant expressions.
return Lang.CPlusPlus11 && isConstexpr();
}
/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
/// form, which contains extra information on the evaluated value of the
/// initializer.
EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
if (!Eval) {
Stmt *S = Init.get<Stmt *>();
// Note: EvaluatedStmt contains an APValue, which usually holds
// resources not allocated from the ASTContext. We need to do some
// work to avoid leaking those, but we do so in VarDecl::evaluateValue
// where we can detect whether there's anything to clean up or not.
Eval = new (getASTContext()) EvaluatedStmt;
Eval->Value = S;
Init = Eval;
}
return Eval;
}
APValue *VarDecl::evaluateValue() const {
SmallVector<PartialDiagnosticAt, 8> Notes;
return evaluateValue(Notes);
}
namespace {
// Destroy an APValue that was allocated in an ASTContext.
void DestroyAPValue(void* UntypedValue) {
static_cast<APValue*>(UntypedValue)->~APValue();
}
} // namespace
APValue *VarDecl::evaluateValue(
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
EvaluatedStmt *Eval = ensureEvaluatedStmt();
// We only produce notes indicating why an initializer is non-constant the
// first time it is evaluated. FIXME: The notes won't always be emitted the
// first time we try evaluation, so might not be produced at all.
if (Eval->WasEvaluated)
return Eval->Evaluated.isUninit() ? nullptr : &Eval->Evaluated;
const Expr *Init = cast<Expr>(Eval->Value);
assert(!Init->isValueDependent());
if (Eval->IsEvaluating) {
// FIXME: Produce a diagnostic for self-initialization.
Eval->CheckedICE = true;
Eval->IsICE = false;
return nullptr;
}
Eval->IsEvaluating = true;
bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
this, Notes);
// Ensure the computed APValue is cleaned up later if evaluation succeeded,
// or that it's empty (so that there's nothing to clean up) if evaluation
// failed.
if (!Result)
Eval->Evaluated = APValue();
else if (Eval->Evaluated.needsCleanup())
getASTContext().AddDeallocation(DestroyAPValue, &Eval->Evaluated);
Eval->IsEvaluating = false;
Eval->WasEvaluated = true;
// In C++11, we have determined whether the initializer was a constant
// expression as a side-effect.
if (getASTContext().getLangOpts().CPlusPlus11 && !Eval->CheckedICE) {
Eval->CheckedICE = true;
Eval->IsICE = Result && Notes.empty();
}
return Result ? &Eval->Evaluated : nullptr;
}
bool VarDecl::checkInitIsICE() const {
// Initializers of weak variables are never ICEs.
if (isWeak())
return false;
EvaluatedStmt *Eval = ensureEvaluatedStmt();
if (Eval->CheckedICE)
// We have already checked whether this subexpression is an
// integral constant expression.
return Eval->IsICE;
const Expr *Init = cast<Expr>(Eval->Value);
assert(!Init->isValueDependent());
// In C++11, evaluate the initializer to check whether it's a constant
// expression.
if (getASTContext().getLangOpts().CPlusPlus11) {
SmallVector<PartialDiagnosticAt, 8> Notes;
evaluateValue(Notes);
return Eval->IsICE;
}
// It's an ICE whether or not the definition we found is
// out-of-line. See DR 721 and the discussion in Clang PR
// 6206 for details.
if (Eval->CheckingICE)
return false;
Eval->CheckingICE = true;
Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
Eval->CheckingICE = false;
Eval->CheckedICE = true;
return Eval->IsICE;
}
VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return cast<VarDecl>(MSI->getInstantiatedFrom());
return nullptr;
}
TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(this))
return Spec->getSpecializationKind();
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return MSI->getTemplateSpecializationKind();
return TSK_Undeclared;
}
SourceLocation VarDecl::getPointOfInstantiation() const {
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(this))
return Spec->getPointOfInstantiation();
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return MSI->getPointOfInstantiation();
return SourceLocation();
}
VarTemplateDecl *VarDecl::getDescribedVarTemplate() const {
return getASTContext().getTemplateOrSpecializationInfo(this)
.dyn_cast<VarTemplateDecl *>();
}
void VarDecl::setDescribedVarTemplate(VarTemplateDecl *Template) {
getASTContext().setTemplateOrSpecializationInfo(this, Template);
}
MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
if (isStaticDataMember())
// FIXME: Remove ?
// return getASTContext().getInstantiatedFromStaticDataMember(this);
return getASTContext().getTemplateOrSpecializationInfo(this)
.dyn_cast<MemberSpecializationInfo *>();
return nullptr;
}
void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation) {
assert((isa<VarTemplateSpecializationDecl>(this) ||
getMemberSpecializationInfo()) &&
"not a variable or static data member template specialization");
if (VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(this)) {
Spec->setSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
Spec->getPointOfInstantiation().isInvalid())
Spec->setPointOfInstantiation(PointOfInstantiation);
}
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo()) {
MSI->setTemplateSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization && PointOfInstantiation.isValid() &&
MSI->getPointOfInstantiation().isInvalid())
MSI->setPointOfInstantiation(PointOfInstantiation);
}
}
void
VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD,
TemplateSpecializationKind TSK) {
assert(getASTContext().getTemplateOrSpecializationInfo(this).isNull() &&
"Previous template or instantiation?");
getASTContext().setInstantiatedFromStaticDataMember(this, VD, TSK);
}
//===----------------------------------------------------------------------===//
// ParmVarDecl Implementation
//===----------------------------------------------------------------------===//
ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo,
StorageClass S, Expr *DefArg,
hlsl::ParameterModifier ParamMod) {
return new (C, DC) ParmVarDecl(ParmVar, C, DC, StartLoc, IdLoc, Id, T, TInfo,
S, DefArg, ParamMod);
}
QualType ParmVarDecl::getOriginalType() const {
TypeSourceInfo *TSI = getTypeSourceInfo();
QualType T = TSI ? TSI->getType() : getType();
if (const DecayedType *DT = dyn_cast<DecayedType>(T))
return DT->getOriginalType();
return T;
}
ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID)
ParmVarDecl(ParmVar, C, nullptr, SourceLocation(), SourceLocation(),
nullptr, QualType(), nullptr, SC_None, nullptr, hlsl::ParameterModifier());
}
SourceRange ParmVarDecl::getSourceRange() const {
if (!hasInheritedDefaultArg()) {
SourceRange ArgRange = getDefaultArgRange();
if (ArgRange.isValid())
return SourceRange(getOuterLocStart(), ArgRange.getEnd());
}
// DeclaratorDecl considers the range of postfix types as overlapping with the
// declaration name, but this is not the case with parameters in ObjC methods.
if (isa<ObjCMethodDecl>(getDeclContext()))
return SourceRange(DeclaratorDecl::getLocStart(), getLocation());
return DeclaratorDecl::getSourceRange();
}
Expr *ParmVarDecl::getDefaultArg() {
assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
assert(!hasUninstantiatedDefaultArg() &&
"Default argument is not yet instantiated!");
Expr *Arg = getInit();
if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
return E->getSubExpr();
return Arg;
}
SourceRange ParmVarDecl::getDefaultArgRange() const {
if (const Expr *E = getInit())
return E->getSourceRange();
if (hasUninstantiatedDefaultArg())
return getUninstantiatedDefaultArg()->getSourceRange();
return SourceRange();
}
bool ParmVarDecl::isParameterPack() const {
return isa<PackExpansionType>(getType());
}
void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) {
getASTContext().setParameterIndex(this, parameterIndex);
ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel;
}
unsigned ParmVarDecl::getParameterIndexLarge() const {
return getASTContext().getParameterIndex(this);
}
// HLSL Change Begins
void ParmVarDecl::updateOutParamToRefType(ASTContext &C) {
// Aggregate type will be indirect param convert to pointer type.
// So don't update to ReferenceType.
if ((!getType()->isArrayType() && !getType()->isRecordType()) ||
hlsl::IsHLSLVecMatType(getType()))
setType(C.getLValueReferenceType(getType(), false));
// Add restrict to out param.
QualType QT = getType();
QT.addRestrict();
setType(QT);
}
// HLSL Change Ends
//===----------------------------------------------------------------------===//
// FunctionDecl Implementation
//===----------------------------------------------------------------------===//
void FunctionDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs();
if (TemplateArgs)
TemplateSpecializationType::PrintTemplateArgumentList(
OS, TemplateArgs->data(), TemplateArgs->size(), Policy);
}
bool FunctionDecl::isVariadic() const {
if (const FunctionProtoType *FT = getType()->getAs<FunctionProtoType>())
return FT->isVariadic();
return false;
}
bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
for (auto I : redecls()) {
if (I->Body || I->IsLateTemplateParsed) {
Definition = I;
return true;
}
}
return false;
}
bool FunctionDecl::hasTrivialBody() const
{
Stmt *S = getBody();
if (!S) {
// Since we don't have a body for this function, we don't know if it's
// trivial or not.
return false;
}
if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
return true;
return false;
}
bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
for (auto I : redecls()) {
if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed ||
I->hasAttr<AliasAttr>()) {
Definition = I->IsDeleted ? I->getCanonicalDecl() : I;
return true;
}
}
return false;
}
Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
if (!hasBody(Definition))
return nullptr;
if (Definition->Body)
return Definition->Body.get(getASTContext().getExternalSource());
return nullptr;
}
void FunctionDecl::setBody(Stmt *B) {
Body = B;
if (B)
EndRangeLoc = B->getLocEnd();
}
void FunctionDecl::setPure(bool P) {
IsPure = P;
if (P)
if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
Parent->markedVirtualFunctionPure();
}
template<std::size_t Len>
static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) {
IdentifierInfo *II = ND->getIdentifier();
return II && II->isStr(Str);
}
bool FunctionDecl::isMain() const {
const TranslationUnitDecl *tunit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
return tunit &&
!tunit->getASTContext().getLangOpts().Freestanding &&
isNamed(this, "main");
}
bool FunctionDecl::isMSVCRTEntryPoint() const {
const TranslationUnitDecl *TUnit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
if (!TUnit)
return false;
// Even though we aren't really targeting MSVCRT if we are freestanding,
// semantic analysis for these functions remains the same.
// MSVCRT entry points only exist on MSVCRT targets.
if (!TUnit->getASTContext().getTargetInfo().getTriple().isOSMSVCRT())
return false;
// Nameless functions like constructors cannot be entry points.
if (!getIdentifier())
return false;
return llvm::StringSwitch<bool>(getName())
.Cases("main", // an ANSI console app
"wmain", // a Unicode console App
"WinMain", // an ANSI GUI app
"wWinMain", // a Unicode GUI app
"DllMain", // a DLL
true)
.Default(false);
}
bool FunctionDecl::isReservedGlobalPlacementOperator() const {
assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName);
assert(getDeclName().getCXXOverloadedOperator() == OO_New ||
getDeclName().getCXXOverloadedOperator() == OO_Delete ||
getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
if (proto->getNumParams() != 2 || proto->isVariadic())
return false;
ASTContext &Context =
cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
->getASTContext();
// The result type and first argument type are constant across all
// these operators. The second argument must be exactly void*.
return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
}
bool FunctionDecl::isReplaceableGlobalAllocationFunction() const {
if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
return false;
if (getDeclName().getCXXOverloadedOperator() != OO_New &&
getDeclName().getCXXOverloadedOperator() != OO_Delete &&
getDeclName().getCXXOverloadedOperator() != OO_Array_New &&
getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
return false;
if (isa<CXXRecordDecl>(getDeclContext()))
return false;
// This can only fail for an invalid 'operator new' declaration.
if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
if (FPT->getNumParams() == 0 || FPT->getNumParams() > 2 || FPT->isVariadic())
return false;
// If this is a single-parameter function, it must be a replaceable global
// allocation or deallocation function.
if (FPT->getNumParams() == 1)
return true;
// Otherwise, we're looking for a second parameter whose type is
// 'const std::nothrow_t &', or, in C++1y, 'std::size_t'.
QualType Ty = FPT->getParamType(1);
ASTContext &Ctx = getASTContext();
if (Ctx.getLangOpts().SizedDeallocation &&
Ctx.hasSameType(Ty, Ctx.getSizeType()))
return true;
if (!Ty->isReferenceType())
return false;
Ty = Ty->getPointeeType();
if (Ty.getCVRQualifiers() != Qualifiers::Const)
return false;
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
return RD && isNamed(RD, "nothrow_t") && RD->isInStdNamespace();
}
LanguageLinkage FunctionDecl::getLanguageLinkage() const {
return getDeclLanguageLinkage(*this);
}
bool FunctionDecl::isExternC() const {
return isDeclExternC(*this);
}
bool FunctionDecl::isInExternCContext() const {
return getLexicalDeclContext()->isExternCContext();
}
bool FunctionDecl::isInExternCXXContext() const {
return getLexicalDeclContext()->isExternCXXContext();
}
bool FunctionDecl::isGlobal() const {
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this))
return Method->isStatic();
if (getCanonicalDecl()->getStorageClass() == SC_Static)
return false;
for (const DeclContext *DC = getDeclContext();
DC->isNamespace();
DC = DC->getParent()) {
if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) {
if (!Namespace->getDeclName())
return false;
break;
}
}
return true;
}
bool FunctionDecl::isNoReturn() const {
return hasAttr<NoReturnAttr>() || hasAttr<CXX11NoReturnAttr>() ||
hasAttr<C11NoReturnAttr>() ||
getType()->getAs<FunctionType>()->getNoReturnAttr();
}
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);
if (FunctionTemplateDecl *FunTmpl = getDescribedFunctionTemplate()) {
FunctionTemplateDecl *PrevFunTmpl
= PrevDecl? PrevDecl->getDescribedFunctionTemplate() : nullptr;
assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch");
FunTmpl->setPreviousDecl(PrevFunTmpl);
}
if (PrevDecl && PrevDecl->IsInline)
IsInline = true;
}
FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
/// \brief Returns a value indicating whether this function
/// corresponds to a builtin function.
///
/// The function corresponds to a built-in function if it is
/// declared at translation scope or within an extern "C" block and
/// its name matches with the name of a builtin. The returned value
/// will be 0 for functions that do not correspond to a builtin, a
/// value of type \c Builtin::ID if in the target-independent range
/// \c [1,Builtin::First), or a target-specific builtin value.
unsigned FunctionDecl::getBuiltinID() const {
if (!getIdentifier())
return 0;
unsigned BuiltinID = getIdentifier()->getBuiltinID();
if (!BuiltinID)
return 0;
ASTContext &Context = getASTContext();
if (Context.getLangOpts().CPlusPlus) {
const LinkageSpecDecl *LinkageDecl = dyn_cast<LinkageSpecDecl>(
getFirstDecl()->getDeclContext());
// In C++, the first declaration of a builtin is always inside an implicit
// extern "C".
// FIXME: A recognised library function may not be directly in an extern "C"
// declaration, for instance "extern "C" { namespace std { decl } }".
if (!LinkageDecl) {
if (BuiltinID == Builtin::BI__GetExceptionInfo &&
Context.getTargetInfo().getCXXABI().isMicrosoft() &&
isInStdNamespace())
return Builtin::BI__GetExceptionInfo;
return 0;
}
if (LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c)
return 0;
}
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
if (hasAttr<OverloadableAttr>())
return 0;
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return BuiltinID;
// This function has the name of a known C library
// function. Determine whether it actually refers to the C library
// function or whether it just has the same name.
// If this is a static function, it's not a builtin.
if (getStorageClass() == SC_Static)
return 0;
return BuiltinID;
}
/// getNumParams - Return the number of parameters this function must have
/// based on its FunctionType. This is the length of the ParamInfo array
/// after it has been created.
unsigned FunctionDecl::getNumParams() const {
const FunctionProtoType *FPT = getType()->getAs<FunctionProtoType>();
return FPT ? FPT->getNumParams() : 0;
}
void FunctionDecl::setParams(ASTContext &C,
ArrayRef<ParmVarDecl *> NewParamInfo) {
assert(!ParamInfo && "Already has param info!");
assert(NewParamInfo.size() == getNumParams() && "Parameter count mismatch!");
// Zero params -> null pointer.
if (!NewParamInfo.empty()) {
ParamInfo = new (C) ParmVarDecl*[NewParamInfo.size()];
std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
}
}
void FunctionDecl::setDeclsInPrototypeScope(ArrayRef<NamedDecl *> NewDecls) {
assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
if (!NewDecls.empty()) {
NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
std::copy(NewDecls.begin(), NewDecls.end(), A);
DeclsInPrototypeScope = llvm::makeArrayRef(A, NewDecls.size());
// Move declarations introduced in prototype to the function context.
for (auto I : NewDecls) {
DeclContext *DC = I->getDeclContext();
// Forward-declared reference to an enumeration is not added to
// declaration scope, so skip declaration that is absent from its
// declaration contexts.
if (DC->containsDecl(I)) {
DC->removeDecl(I);
I->setDeclContext(this);
addDecl(I);
}
}
}
}
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
/// arguments (in C++) or are parameter packs (C++11).
unsigned FunctionDecl::getMinRequiredArguments() const {
if (!getASTContext().getLangOpts().CPlusPlus)
return getNumParams();
unsigned NumRequiredArgs = 0;
for (auto *Param : params())
if (!Param->isParameterPack() && !Param->hasDefaultArg())
++NumRequiredArgs;
return NumRequiredArgs;
}
/// \brief The combination of the extern and inline keywords under MSVC forces
/// the function to be required.
///
/// Note: This function assumes that we will only get called when isInlined()
/// would return true for this FunctionDecl.
bool FunctionDecl::isMSExternInline() const {
assert(isInlined() && "expected to get called on an inlined function!");
const ASTContext &Context = getASTContext();
if (!Context.getLangOpts().MSVCCompat && !hasAttr<DLLExportAttr>())
return false;
for (const FunctionDecl *FD = getMostRecentDecl(); FD;
FD = FD->getPreviousDecl())
if (!FD->isImplicit() && FD->getStorageClass() == SC_Extern)
return true;
return false;
}
static bool redeclForcesDefMSVC(const FunctionDecl *Redecl) {
if (Redecl->getStorageClass() != SC_Extern)
return false;
for (const FunctionDecl *FD = Redecl->getPreviousDecl(); FD;
FD = FD->getPreviousDecl())
if (!FD->isImplicit() && FD->getStorageClass() == SC_Extern)
return false;
return true;
}
static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
// Only consider file-scope declarations in this test.
if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
return false;
// Only consider explicit declarations; the presence of a builtin for a
// libcall shouldn't affect whether a definition is externally visible.
if (Redecl->isImplicit())
return false;
if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
return true; // Not an inline definition
return false;
}
/// \brief For a function declaration in C or C++, determine whether this
/// declaration causes the definition to be externally visible.
///
/// For instance, this determines if adding the current declaration to the set
/// of redeclarations of the given functions causes
/// isInlineDefinitionExternallyVisible to change from false to true.
bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
assert(!doesThisDeclarationHaveABody() &&
"Must have a declaration without a body.");
ASTContext &Context = getASTContext();
if (Context.getLangOpts().MSVCCompat) {
const FunctionDecl *Definition;
if (hasBody(Definition) && Definition->isInlined() &&
redeclForcesDefMSVC(this))
return true;
}
if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
// With GNU inlining, a declaration with 'inline' but not 'extern', forces
// an externally visible definition.
//
// FIXME: What happens if gnu_inline gets added on after the first
// declaration?
if (!isInlineSpecified() || getStorageClass() == SC_Extern)
return false;
const FunctionDecl *Prev = this;
bool FoundBody = false;
while ((Prev = Prev->getPreviousDecl())) {
FoundBody |= Prev->Body.isValid();
if (Prev->Body) {
// If it's not the case that both 'inline' and 'extern' are
// specified on the definition, then it is always externally visible.
if (!Prev->isInlineSpecified() ||
Prev->getStorageClass() != SC_Extern)
return false;
} else if (Prev->isInlineSpecified() &&
Prev->getStorageClass() != SC_Extern) {
return false;
}
}
return FoundBody;
}
if (Context.getLangOpts().CPlusPlus)
return false;
// C99 6.7.4p6:
// [...] If all of the file scope declarations for a function in a
// translation unit include the inline function specifier without extern,
// then the definition in that translation unit is an inline definition.
if (isInlineSpecified() && getStorageClass() != SC_Extern)
return false;
const FunctionDecl *Prev = this;
bool FoundBody = false;
while ((Prev = Prev->getPreviousDecl())) {
FoundBody |= Prev->Body.isValid();
if (RedeclForcesDefC99(Prev))
return false;
}
return FoundBody;
}
SourceRange FunctionDecl::getReturnTypeSourceRange() const {
const TypeSourceInfo *TSI = getTypeSourceInfo();
if (!TSI)
return SourceRange();
FunctionTypeLoc FTL =
TSI->getTypeLoc().IgnoreParens().getAs<FunctionTypeLoc>();
if (!FTL)
return SourceRange();
// Skip self-referential return types.
const SourceManager &SM = getASTContext().getSourceManager();
SourceRange RTRange = FTL.getReturnLoc().getSourceRange();
SourceLocation Boundary = getNameInfo().getLocStart();
if (RTRange.isInvalid() || Boundary.isInvalid() ||
!SM.isBeforeInTranslationUnit(RTRange.getEnd(), Boundary))
return SourceRange();
return RTRange;
}
bool FunctionDecl::hasUnusedResultAttr() const {
QualType RetType = getReturnType();
if (RetType->isRecordType()) {
const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl();
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(this);
if (Ret && Ret->hasAttr<WarnUnusedResultAttr>() &&
!(MD && MD->getCorrespondingMethodInClass(Ret, true)))
return true;
}
return hasAttr<WarnUnusedResultAttr>();
}
/// \brief For an inline function definition in C, or for a gnu_inline function
/// in C++, determine whether the definition will be externally visible.
///
/// Inline function definitions are always available for inlining optimizations.
/// However, depending on the language dialect, declaration specifiers, and
/// attributes, the definition of an inline function may or may not be
/// "externally" visible to other translation units in the program.
///
/// In C99, inline definitions are not externally visible by default. However,
/// if even one of the global-scope declarations is marked "extern inline", the
/// inline definition becomes externally visible (C99 6.7.4p6).
///
/// In GNU89 mode, or if the gnu_inline attribute is attached to the function
/// definition, we use the GNU semantics for inline, which are nearly the
/// opposite of C99 semantics. In particular, "inline" by itself will create
/// an externally visible symbol, but "extern inline" will not create an
/// externally visible symbol.
bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
assert(doesThisDeclarationHaveABody() && "Must have the function definition");
assert(isInlined() && "Function must be inline");
ASTContext &Context = getASTContext();
if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
// Note: If you change the logic here, please change
// doesDeclarationForceExternallyVisibleDefinition as well.
//
// If it's not the case that both 'inline' and 'extern' are
// specified on the definition, then this inline definition is
// externally visible.
if (!(isInlineSpecified() && getStorageClass() == SC_Extern))
return true;
// If any declaration is 'inline' but not 'extern', then this definition
// is externally visible.
for (auto Redecl : redecls()) {
if (Redecl->isInlineSpecified() &&
Redecl->getStorageClass() != SC_Extern)
return true;
}
return false;
}
// The rest of this function is C-only.
assert(!Context.getLangOpts().CPlusPlus &&
"should not use C inline rules in C++");
// C99 6.7.4p6:
// [...] If all of the file scope declarations for a function in a
// translation unit include the inline function specifier without extern,
// then the definition in that translation unit is an inline definition.
for (auto Redecl : redecls()) {
if (RedeclForcesDefC99(Redecl))
return true;
}
// C99 6.7.4p6:
// An inline definition does not provide an external definition for the
// function, and does not forbid an external definition in another
// translation unit.
return false;
}
/// getOverloadedOperator - Which C++ overloaded operator this
/// function represents, if any.
OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
return getDeclName().getCXXOverloadedOperator();
else
return OO_None;
}
/// getLiteralIdentifier - The literal suffix identifier this function
/// represents, if any.
const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName)
return getDeclName().getCXXLiteralIdentifier();
else
return nullptr;
}
FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
if (TemplateOrSpecialization.isNull())
return TK_NonTemplate;
if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
return TK_FunctionTemplate;
if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
return TK_MemberSpecialization;
if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
return TK_FunctionTemplateSpecialization;
if (TemplateOrSpecialization.is
<DependentFunctionTemplateSpecializationInfo*>())
return TK_DependentFunctionTemplateSpecialization;
llvm_unreachable("Did we miss a TemplateOrSpecialization type?");
}
FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
return cast<FunctionDecl>(Info->getInstantiatedFrom());
return nullptr;
}
void
FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
FunctionDecl *FD,
TemplateSpecializationKind TSK) {
assert(TemplateOrSpecialization.isNull() &&
"Member function is already a specialization");
MemberSpecializationInfo *Info
= new (C) MemberSpecializationInfo(FD, TSK);
TemplateOrSpecialization = Info;
}
bool FunctionDecl::isImplicitlyInstantiable() const {
// If the function is invalid, it can't be implicitly instantiated.
if (isInvalidDecl())
return false;
switch (getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitInstantiationDefinition:
return false;
case TSK_ImplicitInstantiation:
return true;
// It is possible to instantiate TSK_ExplicitSpecialization kind
// if the FunctionDecl has a class scope specialization pattern.
case TSK_ExplicitSpecialization:
return getClassScopeSpecializationPattern() != nullptr;
case TSK_ExplicitInstantiationDeclaration:
// Handled below.
break;
}
// Find the actual template from which we will instantiate.
const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
bool HasPattern = false;
if (PatternDecl)
HasPattern = PatternDecl->hasBody(PatternDecl);
// C++0x [temp.explicit]p9:
// Except for inline functions, other explicit instantiation declarations
// have the effect of suppressing the implicit instantiation of the entity
// to which they refer.
if (!HasPattern || !PatternDecl)
return true;
return PatternDecl->isInlined();
}
bool FunctionDecl::isTemplateInstantiation() const {
switch (getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
return false;
case TSK_ImplicitInstantiation:
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
return true;
}
llvm_unreachable("All TSK values handled.");
}
FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
// Handle class scope explicit specialization special case.
if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
return getClassScopeSpecializationPattern();
// If this is a generic lambda call operator specialization, its
// instantiation pattern is always its primary template's pattern
// even if its primary template was instantiated from another
// member template (which happens with nested generic lambdas).
// Since a lambda's call operator's body is transformed eagerly,
// we don't have to go hunting for a prototype definition template
// (i.e. instantiated-from-member-template) to use as an instantiation
// pattern.
if (isGenericLambdaCallOperatorSpecialization(
dyn_cast<CXXMethodDecl>(this))) {
assert(getPrimaryTemplate() && "A generic lambda specialization must be "
"generated from a primary call operator "
"template");
assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() &&
"A generic lambda call operator template must always have a body - "
"even if instantiated from a prototype (i.e. as written) member "
"template");
return getPrimaryTemplate()->getTemplatedDecl();
}
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
while (Primary->getInstantiatedFromMemberTemplate()) {
// If we have hit a point where the user provided a specialization of
// this template, we're done looking.
if (Primary->isMemberSpecialization())
break;
Primary = Primary->getInstantiatedFromMemberTemplate();
}
return Primary->getTemplatedDecl();
}
return getInstantiatedFromMemberFunction();
}
FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
if (FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization
.dyn_cast<FunctionTemplateSpecializationInfo*>()) {
return Info->Template.getPointer();
}
return nullptr;
}
FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
return getASTContext().getClassScopeSpecializationPattern(this);
}
const TemplateArgumentList *
FunctionDecl::getTemplateSpecializationArgs() const {
if (FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization
.dyn_cast<FunctionTemplateSpecializationInfo*>()) {
return Info->TemplateArguments;
}
return nullptr;
}
const ASTTemplateArgumentListInfo *
FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
if (FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization
.dyn_cast<FunctionTemplateSpecializationInfo*>()) {
return Info->TemplateArgumentsAsWritten;
}
return nullptr;
}
void
FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK,
const TemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation PointOfInstantiation) {
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
FunctionTemplateSpecializationInfo *Info
= TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
if (!Info)
Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
TemplateArgs,
TemplateArgsAsWritten,
PointOfInstantiation);
TemplateOrSpecialization = Info;
Template->addSpecialization(Info, InsertPos);
}
void
FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs) {
assert(TemplateOrSpecialization.isNull());
size_t Size = sizeof(DependentFunctionTemplateSpecializationInfo);
Size += Templates.size() * sizeof(FunctionTemplateDecl*);
Size += TemplateArgs.size() * sizeof(TemplateArgumentLoc);
void *Buffer = Context.Allocate(Size);
DependentFunctionTemplateSpecializationInfo *Info =
new (Buffer) DependentFunctionTemplateSpecializationInfo(Templates,
TemplateArgs);
TemplateOrSpecialization = Info;
}
DependentFunctionTemplateSpecializationInfo::
DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
const TemplateArgumentListInfo &TArgs)
: AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0,
"Trailing data is unaligned!");
d.NumTemplates = Ts.size();
d.NumArgs = TArgs.size();
FunctionTemplateDecl **TsArray =
const_cast<FunctionTemplateDecl**>(getTemplates());
for (unsigned I = 0, E = Ts.size(); I != E; ++I)
TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
TemplateArgumentLoc *ArgsArray =
const_cast<TemplateArgumentLoc*>(getTemplateArgs());
for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
}
TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
// For a function template specialization, query the specialization
// information object.
FunctionTemplateSpecializationInfo *FTSInfo
= TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
if (FTSInfo)
return FTSInfo->getTemplateSpecializationKind();
MemberSpecializationInfo *MSInfo
= TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
if (MSInfo)
return MSInfo->getTemplateSpecializationKind();
return TSK_Undeclared;
}
void
FunctionDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation) {
if (FunctionTemplateSpecializationInfo *FTSInfo
= TemplateOrSpecialization.dyn_cast<
FunctionTemplateSpecializationInfo*>()) {
FTSInfo->setTemplateSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization &&
PointOfInstantiation.isValid() &&
FTSInfo->getPointOfInstantiation().isInvalid())
FTSInfo->setPointOfInstantiation(PointOfInstantiation);
} else if (MemberSpecializationInfo *MSInfo
= TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) {
MSInfo->setTemplateSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization &&
PointOfInstantiation.isValid() &&
MSInfo->getPointOfInstantiation().isInvalid())
MSInfo->setPointOfInstantiation(PointOfInstantiation);
} else
llvm_unreachable("Function cannot have a template specialization kind");
}
SourceLocation FunctionDecl::getPointOfInstantiation() const {
if (FunctionTemplateSpecializationInfo *FTSInfo
= TemplateOrSpecialization.dyn_cast<
FunctionTemplateSpecializationInfo*>())
return FTSInfo->getPointOfInstantiation();
else if (MemberSpecializationInfo *MSInfo
= TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>())
return MSInfo->getPointOfInstantiation();
return SourceLocation();
}
bool FunctionDecl::isOutOfLine() const {
if (Decl::isOutOfLine())
return true;
// If this function was instantiated from a member function of a
// class template, check whether that member function was defined out-of-line.
if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
const FunctionDecl *Definition;
if (FD->hasBody(Definition))
return Definition->isOutOfLine();
}
// If this function was instantiated from a function template,
// check whether that function template was defined out-of-line.
if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
const FunctionDecl *Definition;
if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
return Definition->isOutOfLine();
}
return false;
}
SourceRange FunctionDecl::getSourceRange() const {
return SourceRange(getOuterLocStart(), EndRangeLoc);
}
unsigned FunctionDecl::getMemoryFunctionKind() const {
IdentifierInfo *FnInfo = getIdentifier();
if (!FnInfo)
return 0;
// Builtin handling.
switch (getBuiltinID()) {
case Builtin::BI__builtin_memset:
case Builtin::BI__builtin___memset_chk:
case Builtin::BImemset:
return Builtin::BImemset;
case Builtin::BI__builtin_memcpy:
case Builtin::BI__builtin___memcpy_chk:
case Builtin::BImemcpy:
return Builtin::BImemcpy;
case Builtin::BI__builtin_memmove:
case Builtin::BI__builtin___memmove_chk:
case Builtin::BImemmove:
return Builtin::BImemmove;
case Builtin::BIstrlcpy:
case Builtin::BI__builtin___strlcpy_chk:
return Builtin::BIstrlcpy;
case Builtin::BIstrlcat:
case Builtin::BI__builtin___strlcat_chk:
return Builtin::BIstrlcat;
case Builtin::BI__builtin_memcmp:
case Builtin::BImemcmp:
return Builtin::BImemcmp;
case Builtin::BI__builtin_strncpy:
case Builtin::BI__builtin___strncpy_chk:
case Builtin::BIstrncpy:
return Builtin::BIstrncpy;
case Builtin::BI__builtin_strncmp:
case Builtin::BIstrncmp:
return Builtin::BIstrncmp;
case Builtin::BI__builtin_strncasecmp:
case Builtin::BIstrncasecmp:
return Builtin::BIstrncasecmp;
case Builtin::BI__builtin_strncat:
case Builtin::BI__builtin___strncat_chk:
case Builtin::BIstrncat:
return Builtin::BIstrncat;
case Builtin::BI__builtin_strndup:
case Builtin::BIstrndup:
return Builtin::BIstrndup;
case Builtin::BI__builtin_strlen:
case Builtin::BIstrlen:
return Builtin::BIstrlen;
default:
if (isExternC()) {
if (FnInfo->isStr("memset"))
return Builtin::BImemset;
else if (FnInfo->isStr("memcpy"))
return Builtin::BImemcpy;
else if (FnInfo->isStr("memmove"))
return Builtin::BImemmove;
else if (FnInfo->isStr("memcmp"))
return Builtin::BImemcmp;
else if (FnInfo->isStr("strncpy"))
return Builtin::BIstrncpy;
else if (FnInfo->isStr("strncmp"))
return Builtin::BIstrncmp;
else if (FnInfo->isStr("strncasecmp"))
return Builtin::BIstrncasecmp;
else if (FnInfo->isStr("strncat"))
return Builtin::BIstrncat;
else if (FnInfo->isStr("strndup"))
return Builtin::BIstrndup;
else if (FnInfo->isStr("strlen"))
return Builtin::BIstrlen;
}
break;
}
return 0;
}
//===----------------------------------------------------------------------===//
// FieldDecl Implementation
//===----------------------------------------------------------------------===//
FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle) {
return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
BW, Mutable, InitStyle);
}
FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) FieldDecl(Field, nullptr, SourceLocation(),
SourceLocation(), nullptr, QualType(), nullptr,
nullptr, false, ICIS_NoInit);
}
bool FieldDecl::isAnonymousStructOrUnion() const {
if (!isImplicit() || getDeclName())
return false;
if (const RecordType *Record = getType()->getAs<RecordType>())
return Record->getDecl()->isAnonymousStructOrUnion();
return false;
}
unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
assert(isBitField() && "not a bitfield");
Expr *BitWidth = static_cast<Expr *>(InitStorage.getPointer());
return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
}
unsigned FieldDecl::getFieldIndex() const {
const FieldDecl *Canonical = getCanonicalDecl();
if (Canonical != this)
return Canonical->getFieldIndex();
if (CachedFieldIndex) return CachedFieldIndex - 1;
unsigned Index = 0;
const RecordDecl *RD = getParent();
for (auto *Field : RD->fields()) {
Field->getCanonicalDecl()->CachedFieldIndex = Index + 1;
++Index;
}
assert(CachedFieldIndex && "failed to find field in parent");
return CachedFieldIndex - 1;
}
SourceRange FieldDecl::getSourceRange() const {
switch (InitStorage.getInt()) {
// All three of these cases store an optional Expr*.
case ISK_BitWidthOrNothing:
case ISK_InClassCopyInit:
case ISK_InClassListInit:
if (const Expr *E = static_cast<const Expr *>(InitStorage.getPointer()))
return SourceRange(getInnerLocStart(), E->getLocEnd());
LLVM_FALLTHROUGH; // HLSL Change
case ISK_CapturedVLAType:
return DeclaratorDecl::getSourceRange();
}
llvm_unreachable("bad init storage kind");
}
void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
assert((getParent()->isLambda() || getParent()->isCapturedRecord()) &&
"capturing type in non-lambda or captured record.");
assert(InitStorage.getInt() == ISK_BitWidthOrNothing &&
InitStorage.getPointer() == nullptr &&
"bit width, initializer or captured type already set");
InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType),
ISK_CapturedVLAType);
}
//===----------------------------------------------------------------------===//
// TagDecl Implementation
//===----------------------------------------------------------------------===//
SourceLocation TagDecl::getOuterLocStart() const {
return getTemplateOrInnerLocStart(this);
}
SourceRange TagDecl::getSourceRange() const {
SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
return SourceRange(getOuterLocStart(), E);
}
TagDecl *TagDecl::getCanonicalDecl() { return getFirstDecl(); }
void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
NamedDeclOrQualifier = TDD;
if (const Type *T = getTypeForDecl()) {
(void)T;
assert(T->isLinkageValid());
}
assert(isLinkageValid());
}
void TagDecl::startDefinition() {
IsBeingDefined = true;
if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(this)) {
struct CXXRecordDecl::DefinitionData *Data =
new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
for (auto I : redecls())
cast<CXXRecordDecl>(I)->DefinitionData = Data;
}
}
void TagDecl::completeDefinition() {
assert((!isa<CXXRecordDecl>(this) ||
cast<CXXRecordDecl>(this)->hasDefinition()) &&
"definition completed but not started");
IsCompleteDefinition = true;
IsBeingDefined = false;
if (ASTMutationListener *L = getASTMutationListener())
L->CompletedTagDefinition(this);
}
TagDecl *TagDecl::getDefinition() const {
if (isCompleteDefinition())
return const_cast<TagDecl *>(this);
// If it's possible for us to have an out-of-date definition, check now.
if (MayHaveOutOfDateDef) {
if (IdentifierInfo *II = getIdentifier()) {
if (II->isOutOfDate()) {
updateOutOfDate(*II);
}
}
}
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
for (auto R : redecls())
if (R->isCompleteDefinition())
return R;
return nullptr;
}
void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
if (QualifierLoc) {
// Make sure the extended qualifier info is allocated.
if (!hasExtInfo())
NamedDeclOrQualifier = new (getASTContext()) ExtInfo;
// Set qualifier info.
getExtInfo()->QualifierLoc = QualifierLoc;
} else {
// Here Qualifier == 0, i.e., we are removing the qualifier (if any).
if (hasExtInfo()) {
if (getExtInfo()->NumTemplParamLists == 0) {
getASTContext().Deallocate(getExtInfo());
NamedDeclOrQualifier = (TypedefNameDecl*)nullptr;
}
else
getExtInfo()->QualifierLoc = QualifierLoc;
}
}
}
void TagDecl::setTemplateParameterListsInfo(ASTContext &Context,
unsigned NumTPLists,
TemplateParameterList **TPLists) {
assert(NumTPLists > 0);
// Make sure the extended decl info is allocated.
if (!hasExtInfo())
// Allocate external info struct.
NamedDeclOrQualifier = new (getASTContext()) ExtInfo;
// Set the template parameter lists info.
getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
}
//===----------------------------------------------------------------------===//
// EnumDecl Implementation
//===----------------------------------------------------------------------===//
void EnumDecl::anchor() { }
EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id,
EnumDecl *PrevDecl, bool IsScoped,
bool IsScopedUsingClassTag, bool IsFixed) {
EnumDecl *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
IsScoped, IsScopedUsingClassTag,
IsFixed);
Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
C.getTypeDeclType(Enum, PrevDecl);
return Enum;
}
EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
EnumDecl *Enum =
new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr, false, false, false);
Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
return Enum;
}
SourceRange EnumDecl::getIntegerTypeRange() const {
if (const TypeSourceInfo *TI = getIntegerTypeSourceInfo())
return TI->getTypeLoc().getSourceRange();
return SourceRange();
}
void EnumDecl::completeDefinition(QualType NewType,
QualType NewPromotionType,
unsigned NumPositiveBits,
unsigned NumNegativeBits) {
assert(!isCompleteDefinition() && "Cannot redefine enums!");
if (!IntegerType)
IntegerType = NewType.getTypePtr();
PromotionType = NewPromotionType;
setNumPositiveBits(NumPositiveBits);
setNumNegativeBits(NumNegativeBits);
TagDecl::completeDefinition();
}
TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return MSI->getTemplateSpecializationKind();
return TSK_Undeclared;
}
void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation) {
MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
assert(MSI && "Not an instantiated member enumeration?");
MSI->setTemplateSpecializationKind(TSK);
if (TSK != TSK_ExplicitSpecialization &&
PointOfInstantiation.isValid() &&
MSI->getPointOfInstantiation().isInvalid())
MSI->setPointOfInstantiation(PointOfInstantiation);
}
EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const {
if (SpecializationInfo)
return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom());
return nullptr;
}
void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
TemplateSpecializationKind TSK) {
assert(!SpecializationInfo && "Member enum is already a specialization");
SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
}
//===----------------------------------------------------------------------===//
// RecordDecl Implementation
//===----------------------------------------------------------------------===//
RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
RecordDecl *PrevDecl)
: TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
HasFlexibleArrayMember = false;
AnonymousStructOrUnion = false;
HasObjectMember = false;
HasVolatileMember = false;
LoadedFieldsFromExternalStorage = false;
assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
}
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl) {
RecordDecl *R = new (C, DC) RecordDecl(Record, TK, C, DC,
StartLoc, IdLoc, Id, PrevDecl);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
C.getTypeDeclType(R, PrevDecl);
return R;
}
RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
RecordDecl *R =
new (C, ID) RecordDecl(Record, TTK_Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
return R;
}
bool RecordDecl::isInjectedClassName() const {
return isImplicit() && getDeclName() && getDeclContext()->isRecord() &&
cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
}
bool RecordDecl::isLambda() const {
if (auto RD = dyn_cast<CXXRecordDecl>(this))
return RD->isLambda();
return false;
}
bool RecordDecl::isCapturedRecord() const {
return hasAttr<CapturedRecordAttr>();
}
void RecordDecl::setCapturedRecord() {
addAttr(CapturedRecordAttr::CreateImplicit(getASTContext()));
}
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
LoadFieldsFromExternalStorage();
return field_iterator(decl_iterator(FirstDecl));
}
/// completeDefinition - Notes that the definition of this type is now
/// complete.
void RecordDecl::completeDefinition() {
assert(!isCompleteDefinition() && "Cannot redefine record!");
TagDecl::completeDefinition();
}
/// isMsStruct - Get whether or not this record uses ms_struct layout.
/// This which can be turned on with an attribute, pragma, or the
/// -mms-bitfields command-line option.
bool RecordDecl::isMsStruct(const ASTContext &C) const {
return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
}
static bool isFieldOrIndirectField(Decl::Kind K) {
return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
}
void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource *Source = getASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
// Notify that we have a RecordDecl doing some initialization.
ExternalASTSource::Deserializing TheFields(Source);
SmallVector<Decl*, 64> Decls;
LoadedFieldsFromExternalStorage = true;
switch (Source->FindExternalLexicalDecls(this, isFieldOrIndirectField,
Decls)) {
case ELR_Success:
break;
case ELR_AlreadyLoaded:
case ELR_Failure:
return;
}
#ifndef NDEBUG
// Check that all decls we got were FieldDecls.
for (unsigned i=0, e=Decls.size(); i != e; ++i)
assert(isa<FieldDecl>(Decls[i]) || isa<IndirectFieldDecl>(Decls[i]));
#endif
if (Decls.empty())
return;
std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
/*FieldsAlreadyLoaded=*/false);
}
bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
ASTContext &Context = getASTContext();
if (!Context.getLangOpts().Sanitize.hasOneOf(
SanitizerKind::Address | SanitizerKind::KernelAddress) ||
!Context.getLangOpts().SanitizeAddressFieldPadding)
return false;
const auto &Blacklist = Context.getSanitizerBlacklist();
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this);
// We may be able to relax some of these requirements.
int ReasonToReject = -1;
if (!CXXRD || CXXRD->isExternCContext())
ReasonToReject = 0; // is not C++.
else if (CXXRD->hasAttr<PackedAttr>())
ReasonToReject = 1; // is packed.
else if (CXXRD->isUnion())
ReasonToReject = 2; // is a union.
else if (CXXRD->isTriviallyCopyable())
ReasonToReject = 3; // is trivially copyable.
else if (CXXRD->hasTrivialDestructor())
ReasonToReject = 4; // has trivial destructor.
else if (CXXRD->isStandardLayout())
ReasonToReject = 5; // is standard layout.
else if (Blacklist.isBlacklistedLocation(getLocation(), "field-padding"))
ReasonToReject = 6; // is in a blacklisted file.
else if (Blacklist.isBlacklistedType(getQualifiedNameAsString(),
"field-padding"))
ReasonToReject = 7; // is blacklisted.
if (EmitRemark) {
if (ReasonToReject >= 0)
Context.getDiagnostics().Report(
getLocation(),
diag::remark_sanitize_address_insert_extra_padding_rejected)
<< getQualifiedNameAsString() << ReasonToReject;
else
Context.getDiagnostics().Report(
getLocation(),
diag::remark_sanitize_address_insert_extra_padding_accepted)
<< getQualifiedNameAsString();
}
return ReasonToReject < 0;
}
const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
for (const auto *I : fields()) {
if (I->getIdentifier())
return I;
if (const RecordType *RT = I->getType()->getAs<RecordType>())
if (const FieldDecl *NamedDataMember =
RT->getDecl()->findFirstNamedDataMember())
return NamedDataMember;
}
// We didn't find a named data member.
return nullptr;
}
//===----------------------------------------------------------------------===//
// BlockDecl Implementation
//===----------------------------------------------------------------------===//
void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
assert(!ParamInfo && "Already has param info!");
// Zero params -> null pointer.
if (!NewParamInfo.empty()) {
NumParams = NewParamInfo.size();
ParamInfo = new (getASTContext()) ParmVarDecl*[NewParamInfo.size()];
std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
}
}
void BlockDecl::setCaptures(ASTContext &Context,
const Capture *begin,
const Capture *end,
bool capturesCXXThis) {
CapturesCXXThis = capturesCXXThis;
if (begin == end) {
NumCaptures = 0;
Captures = nullptr;
return;
}
NumCaptures = end - begin;
// Avoid new Capture[] because we don't want to provide a default
// constructor.
size_t allocationSize = NumCaptures * sizeof(Capture);
void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*));
memcpy(buffer, begin, allocationSize);
Captures = static_cast<Capture*>(buffer);
}
bool BlockDecl::capturesVariable(const VarDecl *variable) const {
for (const auto &I : captures())
// Only auto vars can be captured, so no redeclaration worries.
if (I.getVariable() == variable)
return true;
return false;
}
SourceRange BlockDecl::getSourceRange() const {
return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
}
//===----------------------------------------------------------------------===//
// Other Decl Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
void TranslationUnitDecl::anchor() { }
TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C);
}
void ExternCContextDecl::anchor() { }
ExternCContextDecl *ExternCContextDecl::Create(const ASTContext &C,
TranslationUnitDecl *DC) {
return new (C, DC) ExternCContextDecl(DC);
}
void LabelDecl::anchor() { }
LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II) {
return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, IdentL);
}
LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II,
SourceLocation GnuLabelL) {
assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, GnuLabelL);
}
LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) LabelDecl(nullptr, SourceLocation(), nullptr, nullptr,
SourceLocation());
}
void LabelDecl::setMSAsmLabel(StringRef Name) {
char *Buffer = new (getASTContext(), 1) char[Name.size() + 1];
memcpy(Buffer, Name.data(), Name.size());
Buffer[Name.size()] = '\0';
MSAsmName = Buffer;
}
void ValueDecl::anchor() { }
bool ValueDecl::isWeak() const {
for (const auto *I : attrs())
if (isa<WeakAttr>(I) || isa<WeakRefAttr>(I))
return true;
return isWeakImported();
}
void ImplicitParamDecl::anchor() { }
ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdLoc,
IdentifierInfo *Id,
QualType Type) {
return new (C, DC) ImplicitParamDecl(C, DC, IdLoc, Id, Type);
}
ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ImplicitParamDecl(C, nullptr, SourceLocation(), nullptr,
QualType());
}
FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
StorageClass SC,
bool isInlineSpecified,
bool hasWrittenPrototype,
bool isConstexprSpecified) {
FunctionDecl *New =
new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo,
SC, isInlineSpecified, isConstexprSpecified);
New->HasWrittenPrototype = hasWrittenPrototype;
return New;
}
FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) FunctionDecl(Function, C, nullptr, SourceLocation(),
DeclarationNameInfo(), QualType(), nullptr,
SC_None, false, false);
}
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
return new (C, DC) BlockDecl(DC, L);
}
BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) BlockDecl(nullptr, SourceLocation());
}
CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
unsigned NumParams) {
return new (C, DC, NumParams * sizeof(ImplicitParamDecl *))
CapturedDecl(DC, NumParams);
}
CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumParams) {
return new (C, ID, NumParams * sizeof(ImplicitParamDecl *))
CapturedDecl(nullptr, NumParams);
}
EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
SourceLocation L,
IdentifierInfo *Id, QualType T,
Expr *E, const llvm::APSInt &V) {
return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V);
}
EnumConstantDecl *
EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) EnumConstantDecl(nullptr, SourceLocation(), nullptr,
QualType(), nullptr, llvm::APSInt());
}
void IndirectFieldDecl::anchor() { }
IndirectFieldDecl *
IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, NamedDecl **CH,
unsigned CHS) {
return new (C, DC) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
}
IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) IndirectFieldDecl(nullptr, SourceLocation(),
DeclarationName(), QualType(), nullptr,
0);
}
SourceRange EnumConstantDecl::getSourceRange() const {
SourceLocation End = getLocation();
if (Init)
End = Init->getLocEnd();
return SourceRange(getLocation(), End);
}
void TypeDecl::anchor() { }
TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo) {
return new (C, DC) TypedefDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
}
void TypedefNameDecl::anchor() { }
TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
if (auto *TT = getTypeSourceInfo()->getType()->getAs<TagType>()) {
auto *OwningTypedef = TT->getDecl()->getTypedefNameForAnonDecl();
auto *ThisTypedef = this;
if (AnyRedecl && OwningTypedef) {
OwningTypedef = OwningTypedef->getCanonicalDecl();
ThisTypedef = ThisTypedef->getCanonicalDecl();
}
if (OwningTypedef == ThisTypedef)
return TT->getDecl();
}
return nullptr;
}
TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
}
TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
TypeSourceInfo *TInfo) {
return new (C, DC) TypeAliasDecl(C, DC, StartLoc, IdLoc, Id, TInfo);
}
TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) TypeAliasDecl(C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
}
SourceRange TypedefDecl::getSourceRange() const {
SourceLocation RangeEnd = getLocation();
if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
if (typeIsPostfix(TInfo->getType()))
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
}
return SourceRange(getLocStart(), RangeEnd);
}
SourceRange TypeAliasDecl::getSourceRange() const {
SourceLocation RangeEnd = getLocStart();
if (TypeSourceInfo *TInfo = getTypeSourceInfo())
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
return SourceRange(getLocStart(), RangeEnd);
}
void FileScopeAsmDecl::anchor() { }
FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
StringLiteral *Str,
SourceLocation AsmLoc,
SourceLocation RParenLoc) {
return new (C, DC) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
}
FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) FileScopeAsmDecl(nullptr, nullptr, SourceLocation(),
SourceLocation());
}
void EmptyDecl::anchor() {}
EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
return new (C, DC) EmptyDecl(DC, L);
}
EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) EmptyDecl(nullptr, SourceLocation());
}
//===----------------------------------------------------------------------===//
// ImportDecl Implementation
//===----------------------------------------------------------------------===//
/// \brief Retrieve the number of module identifiers needed to name the given
/// module.
static unsigned getNumModuleIdentifiers(Module *Mod) {
unsigned Result = 1;
while (Mod->Parent) {
Mod = Mod->Parent;
++Result;
}
return Result;
}
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs)
: Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true),
NextLocalImport()
{
assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1);
memcpy(StoredLocs, IdentifierLocs.data(),
IdentifierLocs.size() * sizeof(SourceLocation));
}
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported, SourceLocation EndLoc)
: Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
NextLocalImport()
{
*reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
}
ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs) {
return new (C, DC, IdentifierLocs.size() * sizeof(SourceLocation))
ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
}
ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
Module *Imported,
SourceLocation EndLoc) {
ImportDecl *Import =
new (C, DC, sizeof(SourceLocation)) ImportDecl(DC, StartLoc,
Imported, EndLoc);
Import->setImplicit();
return Import;
}
ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumLocations) {
return new (C, ID, NumLocations * sizeof(SourceLocation))
ImportDecl(EmptyShell());
}
ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
if (!ImportedAndComplete.getInt())
return None;
const SourceLocation *StoredLocs
= reinterpret_cast<const SourceLocation *>(this + 1);
return llvm::makeArrayRef(StoredLocs,
getNumModuleIdentifiers(getImportedModule()));
}
SourceRange ImportDecl::getSourceRange() const {
if (!ImportedAndComplete.getInt())
return SourceRange(getLocation(),
*reinterpret_cast<const SourceLocation *>(this + 1));
return SourceRange(getLocation(), getIdentifierLocs().back());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclarationName.cpp | //===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the DeclarationName and DeclarationNameTable
// classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
// //
///////////////////////////////////////////////////////////////////////////////
namespace clang {
/// CXXSpecialName - Records the type associated with one of the
/// "special" kinds of declaration names in C++, e.g., constructors,
/// destructors, and conversion functions.
class CXXSpecialName
: public DeclarationNameExtra, public llvm::FoldingSetNode {
public:
/// Type - The type associated with this declaration name.
QualType Type;
/// FETokenInfo - Extra information associated with this declaration
/// name that can be used by the front end.
void *FETokenInfo;
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(ExtraKindOrNumArgs);
ID.AddPointer(Type.getAsOpaquePtr());
}
};
/// CXXOperatorIdName - Contains extra information for the name of an
/// overloaded operator in C++, such as "operator+.
class CXXOperatorIdName : public DeclarationNameExtra {
public:
/// FETokenInfo - Extra information associated with this operator
/// name that can be used by the front end.
void *FETokenInfo;
};
/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
/// name.
///
/// This identifier is stored here rather than directly in DeclarationName so as
/// to allow Objective-C selectors, which are about a million times more common,
/// to consume minimal memory.
class CXXLiteralOperatorIdName
: public DeclarationNameExtra, public llvm::FoldingSetNode {
public:
IdentifierInfo *ID;
/// FETokenInfo - Extra information associated with this operator
/// name that can be used by the front end.
void *FETokenInfo;
void Profile(llvm::FoldingSetNodeID &FSID) {
FSID.AddPointer(ID);
}
};
static int compareInt(unsigned A, unsigned B) {
return (A < B ? -1 : (A > B ? 1 : 0));
}
int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
if (LHS.getNameKind() != RHS.getNameKind())
return (LHS.getNameKind() < RHS.getNameKind() ? -1 : 1);
switch (LHS.getNameKind()) {
case DeclarationName::Identifier: {
IdentifierInfo *LII = LHS.getAsIdentifierInfo();
IdentifierInfo *RII = RHS.getAsIdentifierInfo();
if (!LII) return RII ? -1 : 0;
if (!RII) return 1;
return LII->getName().compare(RII->getName());
}
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector: {
Selector LHSSelector = LHS.getObjCSelector();
Selector RHSSelector = RHS.getObjCSelector();
unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
switch (LHSSelector.getNameForSlot(I).compare(
RHSSelector.getNameForSlot(I))) {
case -1: return true;
case 1: return false;
default: break;
}
}
return compareInt(LN, RN);
}
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
if (QualTypeOrdering()(LHS.getCXXNameType(), RHS.getCXXNameType()))
return -1;
if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType()))
return 1;
return 0;
case DeclarationName::CXXOperatorName:
return compareInt(LHS.getCXXOverloadedOperator(),
RHS.getCXXOverloadedOperator());
case DeclarationName::CXXLiteralOperatorName:
return LHS.getCXXLiteralIdentifier()->getName().compare(
RHS.getCXXLiteralIdentifier()->getName());
case DeclarationName::CXXUsingDirective:
return 0;
}
llvm_unreachable("Invalid DeclarationName Kind!");
}
raw_ostream &operator<<(raw_ostream &OS, DeclarationName N) {
switch (N.getNameKind()) {
case DeclarationName::Identifier:
if (const IdentifierInfo *II = N.getAsIdentifierInfo())
OS << II->getName();
return OS;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
N.getObjCSelector().print(OS);
return OS;
case DeclarationName::CXXConstructorName: {
QualType ClassType = N.getCXXNameType();
if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
return OS << *ClassRec->getDecl();
LangOptions LO;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS // HLSL Change
LO.CPlusPlus = true;
#endif
return OS << ClassType.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXDestructorName: {
OS << '~';
QualType Type = N.getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>())
return OS << *Rec->getDecl();
LangOptions LO;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS // HLSL Change
LO.CPlusPlus = true;
#endif
return OS << Type.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXOperatorName: {
static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
nullptr,
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
Spelling,
#include "clang/Basic/OperatorKinds.def"
};
const char *OpName = OperatorNames[N.getCXXOverloadedOperator()];
assert(OpName && "not an overloaded operator");
OS << "operator";
if (OpName[0] >= 'a' && OpName[0] <= 'z')
OS << ' ';
return OS << OpName;
}
case DeclarationName::CXXLiteralOperatorName:
return OS << "operator \"\" " << N.getCXXLiteralIdentifier()->getName();
case DeclarationName::CXXConversionFunctionName: {
OS << "operator ";
QualType Type = N.getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>())
return OS << *Rec->getDecl();
LangOptions LO;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS // HLSL Change
LO.CPlusPlus = true;
LO.Bool = true;
#endif
return OS << Type.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXUsingDirective:
return OS << "<using-directive>";
}
llvm_unreachable("Unexpected declaration name kind");
}
} // end namespace clang
DeclarationName::NameKind DeclarationName::getNameKind() const {
switch (getStoredNameKind()) {
case StoredIdentifier: return Identifier;
case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
case StoredObjCOneArgSelector: return ObjCOneArgSelector;
case StoredDeclarationNameExtra:
switch (getExtra()->ExtraKindOrNumArgs) {
case DeclarationNameExtra::CXXConstructor:
return CXXConstructorName;
case DeclarationNameExtra::CXXDestructor:
return CXXDestructorName;
case DeclarationNameExtra::CXXConversionFunction:
return CXXConversionFunctionName;
case DeclarationNameExtra::CXXLiteralOperator:
return CXXLiteralOperatorName;
case DeclarationNameExtra::CXXUsingDirective:
return CXXUsingDirective;
default:
// Check if we have one of the CXXOperator* enumeration values.
if (getExtra()->ExtraKindOrNumArgs <
DeclarationNameExtra::CXXUsingDirective)
return CXXOperatorName;
return ObjCMultiArgSelector;
}
}
// Can't actually get here.
llvm_unreachable("This should be unreachable!");
}
bool DeclarationName::isDependentName() const {
QualType T = getCXXNameType();
return !T.isNull() && T->isDependentType();
}
std::string DeclarationName::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << *this;
return OS.str();
}
QualType DeclarationName::getCXXNameType() const {
if (CXXSpecialName *CXXName = getAsCXXSpecialName())
return CXXName->Type;
else
return QualType();
}
OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
unsigned value
= CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
return static_cast<OverloadedOperatorKind>(value);
} else {
return OO_None;
}
}
IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName())
return CXXLit->ID;
else
return nullptr;
}
void *DeclarationName::getFETokenInfoAsVoidSlow() const {
switch (getNameKind()) {
case Identifier:
llvm_unreachable("Handled by getFETokenInfo()");
case CXXConstructorName:
case CXXDestructorName:
case CXXConversionFunctionName:
return getAsCXXSpecialName()->FETokenInfo;
case CXXOperatorName:
return getAsCXXOperatorIdName()->FETokenInfo;
case CXXLiteralOperatorName:
return getAsCXXLiteralOperatorIdName()->FETokenInfo;
default:
llvm_unreachable("Declaration name has no FETokenInfo");
}
}
void DeclarationName::setFETokenInfo(void *T) {
switch (getNameKind()) {
case Identifier:
getAsIdentifierInfo()->setFETokenInfo(T);
break;
case CXXConstructorName:
case CXXDestructorName:
case CXXConversionFunctionName:
getAsCXXSpecialName()->FETokenInfo = T;
break;
case CXXOperatorName:
getAsCXXOperatorIdName()->FETokenInfo = T;
break;
case CXXLiteralOperatorName:
getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
break;
default:
llvm_unreachable("Declaration name has no FETokenInfo");
}
}
DeclarationName DeclarationName::getUsingDirectiveName() {
// Single instance of DeclarationNameExtra for using-directive
static const DeclarationNameExtra UDirExtra =
{ DeclarationNameExtra::CXXUsingDirective };
uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
Ptr |= StoredDeclarationNameExtra;
return DeclarationName(Ptr);
}
void DeclarationName::dump() const {
llvm::errs() << *this << '\n';
}
DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
// HLSL Change Starts - use std::unique_ptr to avoid leaks
std::unique_ptr<llvm::FoldingSet<CXXSpecialName> > CXXSpecialNamesImplPtr(new llvm::FoldingSet<CXXSpecialName>());
std::unique_ptr<llvm::FoldingSet<CXXLiteralOperatorIdName> > CXXLiteralOperatorNamesPtr(new llvm::FoldingSet<CXXLiteralOperatorIdName>());
// Initialize the overloaded operator names.
CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
CXXOperatorNames[Op].ExtraKindOrNumArgs
= Op + DeclarationNameExtra::CXXConversionFunction;
CXXOperatorNames[Op].FETokenInfo = nullptr;
}
CXXSpecialNamesImpl = CXXSpecialNamesImplPtr.release();
CXXLiteralOperatorNames = CXXLiteralOperatorNamesPtr.release();
// HLSL Change Ends - use std::unique_ptr to avoid leaks
}
DeclarationNameTable::~DeclarationNameTable() {
llvm::FoldingSet<CXXSpecialName> *SpecialNames =
static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
= static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
(CXXLiteralOperatorNames);
delete SpecialNames;
delete LiteralNames;
}
DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) {
return getCXXSpecialName(DeclarationName::CXXConstructorName,
Ty.getUnqualifiedType());
}
DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) {
return getCXXSpecialName(DeclarationName::CXXDestructorName,
Ty.getUnqualifiedType());
}
DeclarationName
DeclarationNameTable::getCXXConversionFunctionName(CanQualType Ty) {
return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty);
}
DeclarationName
DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
CanQualType Ty) {
assert(Kind >= DeclarationName::CXXConstructorName &&
Kind <= DeclarationName::CXXConversionFunctionName &&
"Kind must be a C++ special name kind");
llvm::FoldingSet<CXXSpecialName> *SpecialNames
= static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
DeclarationNameExtra::ExtraKind EKind;
switch (Kind) {
case DeclarationName::CXXConstructorName:
EKind = DeclarationNameExtra::CXXConstructor;
assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified");
break;
case DeclarationName::CXXDestructorName:
EKind = DeclarationNameExtra::CXXDestructor;
assert(!Ty.hasQualifiers() && "Destructor type must be unqualified");
break;
case DeclarationName::CXXConversionFunctionName:
EKind = DeclarationNameExtra::CXXConversionFunction;
break;
default:
return DeclarationName();
}
// Unique selector, to guarantee there is one per name.
llvm::FoldingSetNodeID ID;
ID.AddInteger(EKind);
ID.AddPointer(Ty.getAsOpaquePtr());
void *InsertPos = nullptr;
if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
return DeclarationName(Name);
CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
SpecialName->ExtraKindOrNumArgs = EKind;
SpecialName->Type = Ty;
SpecialName->FETokenInfo = nullptr;
SpecialNames->InsertNode(SpecialName, InsertPos);
return DeclarationName(SpecialName);
}
DeclarationName
DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
}
DeclarationName
DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
= static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
(CXXLiteralOperatorNames);
llvm::FoldingSetNodeID ID;
ID.AddPointer(II);
void *InsertPos = nullptr;
if (CXXLiteralOperatorIdName *Name =
LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
return DeclarationName (Name);
CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
LiteralName->ID = II;
LiteralName->FETokenInfo = nullptr;
LiteralNames->InsertNode(LiteralName, InsertPos);
return DeclarationName(LiteralName);
}
DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
break;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
NamedType.TInfo = nullptr;
break;
case DeclarationName::CXXOperatorName:
CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
break;
case DeclarationName::CXXLiteralOperatorName:
CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding();
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
// FIXME: ?
break;
case DeclarationName::CXXUsingDirective:
break;
}
}
bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
return false;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
return TInfo->getType()->containsUnexpandedParameterPack();
return Name.getCXXNameType()->containsUnexpandedParameterPack();
}
llvm_unreachable("All name kinds handled.");
}
bool DeclarationNameInfo::isInstantiationDependent() const {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
return false;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
return TInfo->getType()->isInstantiationDependentType();
return Name.getCXXNameType()->isInstantiationDependentType();
}
llvm_unreachable("All name kinds handled.");
}
std::string DeclarationNameInfo::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
printName(OS);
return OS.str();
}
void DeclarationNameInfo::printName(raw_ostream &OS) const {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
OS << Name;
return;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) {
if (Name.getNameKind() == DeclarationName::CXXDestructorName)
OS << '~';
else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
OS << "operator ";
LangOptions LO;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS // HLSL Change
LO.CPlusPlus = true;
LO.Bool = true;
#endif
OS << TInfo->getType().getAsString(PrintingPolicy(LO));
} else
OS << Name;
return;
}
llvm_unreachable("Unexpected declaration name kind");
}
SourceLocation DeclarationNameInfo::getEndLoc() const {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
return NameLoc;
case DeclarationName::CXXOperatorName: {
unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc;
return SourceLocation::getFromRawEncoding(raw);
}
case DeclarationName::CXXLiteralOperatorName: {
unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc;
return SourceLocation::getFromRawEncoding(raw);
}
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
return TInfo->getTypeLoc().getEndLoc();
else
return NameLoc;
// DNInfo work in progress: FIXME.
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXUsingDirective:
return NameLoc;
}
llvm_unreachable("Unexpected declaration name kind");
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ItaniumCXXABI.cpp | //===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides C++ AST support targeting the Itanium C++ ABI, which is
// documented at:
// http://www.codesourcery.com/public/cxx-abi/abi.html
// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
//
// It also supports the closely-related ARM C++ ABI, documented at:
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
//
//===----------------------------------------------------------------------===//
#include "CXXABI.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
using namespace clang;
namespace {
/// According to Itanium C++ ABI 5.1.2:
/// the name of an anonymous union is considered to be
/// the name of the first named data member found by a pre-order,
/// depth-first, declaration-order walk of the data members of
/// the anonymous union.
/// If there is no such data member (i.e., if all of the data members
/// in the union are unnamed), then there is no way for a program to
/// refer to the anonymous union, and there is therefore no need to mangle its name.
///
/// Returns the name of anonymous union VarDecl or nullptr if it is not found.
static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
const RecordType *RT = VD.getType()->getAs<RecordType>();
assert(RT && "type of VarDecl is expected to be RecordType.");
assert(RT->getDecl()->isUnion() && "RecordType is expected to be a union.");
if (const FieldDecl *FD = RT->getDecl()->findFirstNamedDataMember()) {
return FD->getIdentifier();
}
return nullptr;
}
/// \brief Keeps track of the mangled names of lambda expressions and block
/// literals within a particular context.
class ItaniumNumberingContext : public MangleNumberingContext {
llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
llvm::DenseMap<const IdentifierInfo *, unsigned> VarManglingNumbers;
llvm::DenseMap<const IdentifierInfo *, unsigned> TagManglingNumbers;
public:
unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
const FunctionProtoType *Proto =
CallOperator->getType()->getAs<FunctionProtoType>();
ASTContext &Context = CallOperator->getASTContext();
QualType Key =
Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
FunctionProtoType::ExtProtoInfo(), None); // HLSL Change - add param modifiers
Key = Context.getCanonicalType(Key);
return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
}
unsigned getManglingNumber(const BlockDecl *BD) override {
const Type *Ty = nullptr;
return ++ManglingNumbers[Ty];
}
unsigned getStaticLocalNumber(const VarDecl *VD) override {
return 0;
}
/// Variable decls are numbered by identifier.
unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
const IdentifierInfo *Identifier = VD->getIdentifier();
if (!Identifier) {
// VarDecl without an identifier represents an anonymous union declaration.
Identifier = findAnonymousUnionVarDeclName(*VD);
}
return ++VarManglingNumbers[Identifier];
}
unsigned getManglingNumber(const TagDecl *TD, unsigned) override {
return ++TagManglingNumbers[TD->getIdentifier()];
}
};
class ItaniumCXXABI : public CXXABI {
protected:
ASTContext &Context;
public:
ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
std::pair<uint64_t, unsigned>
getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override {
const TargetInfo &Target = Context.getTargetInfo();
TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0);
uint64_t Width = Target.getTypeWidth(PtrDiff);
unsigned Align = Target.getTypeAlign(PtrDiff);
if (MPT->isMemberFunctionPointer())
Width = 2 * Width;
return std::make_pair(Width, Align);
}
CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
const llvm::Triple &T = Context.getTargetInfo().getTriple();
if (!isVariadic && T.isWindowsGNUEnvironment() &&
T.getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
return CC_C;
}
// We cheat and just check that the class has a vtable pointer, and that it's
// only big enough to have a vtable pointer and nothing more (or less).
bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
// Check that the class has a vtable pointer.
if (!RD->isDynamicClass())
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
CharUnits PointerSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
return Layout.getNonVirtualSize() == PointerSize;
}
const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override {
return nullptr;
}
void addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
CXXConstructorDecl *CD) override {}
void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx, Expr *DAE) override {}
Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx) override {
return nullptr;
}
MangleNumberingContext *createMangleNumberingContext() const override {
return new ItaniumNumberingContext();
}
};
}
CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) {
return new ItaniumCXXABI(Ctx);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Stmt.cpp | //===--- Stmt.cpp - Statement AST Node Implementation ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Stmt class and statement subclasses.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
static struct StmtClassNameTable {
const char *Name;
unsigned Counter;
unsigned Size;
} StmtClassInfo[Stmt::lastStmtConstant+1];
static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
static bool Initialized = false;
if (Initialized)
return StmtClassInfo[E];
// Intialize the table on the first use.
Initialized = true;
#define ABSTRACT_STMT(STMT)
#define STMT(CLASS, PARENT) \
StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
#include "clang/AST/StmtNodes.inc"
return StmtClassInfo[E];
}
void *Stmt::operator new(size_t bytes, const ASTContext& C,
unsigned alignment) {
return ::operator new(bytes, C, alignment);
}
const char *Stmt::getStmtClassName() const {
return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
}
void Stmt::PrintStats() {
// Ensure the table is primed.
getStmtInfoTableEntry(Stmt::NullStmtClass);
unsigned sum = 0;
llvm::errs() << "\n*** Stmt/Expr Stats:\n";
for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == nullptr) continue;
sum += StmtClassInfo[i].Counter;
}
llvm::errs() << " " << sum << " stmts/exprs total.\n";
sum = 0;
for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == nullptr) continue;
if (StmtClassInfo[i].Counter == 0) continue;
llvm::errs() << " " << StmtClassInfo[i].Counter << " "
<< StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size
<< " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size
<< " bytes)\n";
sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
}
llvm::errs() << "Total bytes = " << sum << "\n";
}
void Stmt::addStmtClass(StmtClass s) {
++getStmtInfoTableEntry(s).Counter;
}
bool Stmt::StatisticsEnabled = false;
void Stmt::EnableStatistics() {
StatisticsEnabled = true;
}
Stmt *Stmt::IgnoreImplicit() {
Stmt *s = this;
if (auto *ewc = dyn_cast<ExprWithCleanups>(s))
s = ewc->getSubExpr();
if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
s = mte->GetTemporaryExpr();
if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s))
s = bte->getSubExpr();
while (auto *ice = dyn_cast<ImplicitCastExpr>(s))
s = ice->getSubExpr();
return s;
}
/// \brief Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
Stmt *S = this;
if (IgnoreCaptured)
if (auto CapS = dyn_cast_or_null<CapturedStmt>(S))
S = CapS->getCapturedStmt();
while (true) {
if (auto AS = dyn_cast_or_null<AttributedStmt>(S))
S = AS->getSubStmt();
else if (auto CS = dyn_cast_or_null<CompoundStmt>(S)) {
if (CS->size() != 1)
break;
S = CS->body_back();
} else
break;
}
return S;
}
/// \brief Strip off all label-like statements.
///
/// This will strip off label statements, case statements, attributed
/// statements and default statements recursively.
const Stmt *Stmt::stripLabelLikeStatements() const {
const Stmt *S = this;
while (true) {
if (const LabelStmt *LS = dyn_cast<LabelStmt>(S))
S = LS->getSubStmt();
else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S))
S = SC->getSubStmt();
else if (const AttributedStmt *AS = dyn_cast<AttributedStmt>(S))
S = AS->getSubStmt();
else
return S;
}
}
namespace {
struct good {};
struct bad {};
// These silly little functions have to be static inline to suppress
// unused warnings, and they have to be defined to suppress other
// warnings.
static inline good is_good(good) { return good(); }
typedef Stmt::child_range children_t();
template <class T> good implements_children(children_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
static inline bad implements_children(children_t Stmt::*) {
return bad();
}
typedef SourceLocation getLocStart_t() const;
template <class T> good implements_getLocStart(getLocStart_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
static inline bad implements_getLocStart(getLocStart_t Stmt::*) {
return bad();
}
typedef SourceLocation getLocEnd_t() const;
template <class T> good implements_getLocEnd(getLocEnd_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
static inline bad implements_getLocEnd(getLocEnd_t Stmt::*) {
return bad();
}
#define ASSERT_IMPLEMENTS_children(type) \
(void) is_good(implements_children(&type::children))
#define ASSERT_IMPLEMENTS_getLocStart(type) \
(void) is_good(implements_getLocStart(&type::getLocStart))
#define ASSERT_IMPLEMENTS_getLocEnd(type) \
(void) is_good(implements_getLocEnd(&type::getLocEnd))
}
/// Check whether the various Stmt classes implement their member
/// functions.
LLVM_ATTRIBUTE_UNUSED
static inline void check_implementations() {
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
ASSERT_IMPLEMENTS_children(type); \
ASSERT_IMPLEMENTS_getLocStart(type); \
ASSERT_IMPLEMENTS_getLocEnd(type);
#include "clang/AST/StmtNodes.inc"
}
Stmt::child_range Stmt::children() {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
case Stmt::type##Class: \
return static_cast<type*>(this)->children();
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind!");
}
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getSourceRange.
//
// See also Expr.cpp:getExprLoc().
namespace {
/// This implementation is used when a class provides a custom
/// implementation of getSourceRange.
template <class S, class T>
SourceRange getSourceRangeImpl(const Stmt *stmt,
SourceRange (T::*v)() const) {
return static_cast<const S*>(stmt)->getSourceRange();
}
/// This implementation is used when a class doesn't provide a custom
/// implementation of getSourceRange. Overload resolution should pick it over
/// the implementation above because it's more specialized according to
/// function template partial ordering.
template <class S>
SourceRange getSourceRangeImpl(const Stmt *stmt,
SourceRange (Stmt::*v)() const) {
return SourceRange(static_cast<const S*>(stmt)->getLocStart(),
static_cast<const S*>(stmt)->getLocEnd());
}
}
SourceRange Stmt::getSourceRange() const {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
case Stmt::type##Class: \
return getSourceRangeImpl<type>(this, &type::getSourceRange);
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind!");
}
SourceLocation Stmt::getLocStart() const {
// llvm::errs() << "getLocStart() for " << getStmtClassName() << "\n";
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
case Stmt::type##Class: \
return static_cast<const type*>(this)->getLocStart();
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind");
}
SourceLocation Stmt::getLocEnd() const {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
case Stmt::type##Class: \
return static_cast<const type*>(this)->getLocEnd();
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind");
}
CompoundStmt::CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB)
: Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
assert(CompoundStmtBits.NumStmts == Stmts.size() &&
"NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
if (Stmts.size() == 0) {
Body = nullptr;
return;
}
Body = new (C) Stmt*[Stmts.size()];
std::copy(Stmts.begin(), Stmts.end(), Body);
}
void CompoundStmt::setStmts(const ASTContext &C, Stmt **Stmts,
unsigned NumStmts) {
if (this->Body)
C.Deallocate(Body);
this->CompoundStmtBits.NumStmts = NumStmts;
Body = new (C) Stmt*[NumStmts];
memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
}
const char *LabelStmt::getName() const {
return getDecl()->getIdentifier()->getNameStart();
}
AttributedStmt *AttributedStmt::Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt) {
assert(!Attrs.empty() && "Attrs should not be empty");
void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * Attrs.size(),
llvm::alignOf<AttributedStmt>());
return new (Mem) AttributedStmt(Loc, Attrs, SubStmt);
}
AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C,
unsigned NumAttrs) {
assert(NumAttrs > 0 && "NumAttrs should be greater than zero");
void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * NumAttrs,
llvm::alignOf<AttributedStmt>());
return new (Mem) AttributedStmt(EmptyShell(), NumAttrs);
}
std::string AsmStmt::generateAsmString(const ASTContext &C) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->generateAsmString(C);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->generateAsmString(C);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getOutputConstraint(unsigned i) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getOutputConstraint(i);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getOutputConstraint(i);
llvm_unreachable("unknown asm statement kind!");
}
const Expr *AsmStmt::getOutputExpr(unsigned i) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getOutputExpr(i);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getOutputExpr(i);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getInputConstraint(unsigned i) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getInputConstraint(i);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getInputConstraint(i);
llvm_unreachable("unknown asm statement kind!");
}
const Expr *AsmStmt::getInputExpr(unsigned i) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getInputExpr(i);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getInputExpr(i);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getClobber(unsigned i) const {
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getClobber(i);
if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getClobber(i);
llvm_unreachable("unknown asm statement kind!");
}
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned AsmStmt::getNumPlusOperands() const {
unsigned Res = 0;
for (unsigned i = 0, e = getNumOutputs(); i != e; ++i)
if (isOutputPlusConstraint(i))
++Res;
return Res;
}
char GCCAsmStmt::AsmStringPiece::getModifier() const {
assert(isOperand() && "Only Operands can have modifiers.");
return isLetter(Str[0]) ? Str[0] : '\0';
}
StringRef GCCAsmStmt::getClobber(unsigned i) const {
return getClobberStringLiteral(i)->getString();
}
Expr *GCCAsmStmt::getOutputExpr(unsigned i) {
return cast<Expr>(Exprs[i]);
}
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef GCCAsmStmt::getOutputConstraint(unsigned i) const {
return getOutputConstraintLiteral(i)->getString();
}
Expr *GCCAsmStmt::getInputExpr(unsigned i) {
return cast<Expr>(Exprs[i + NumOutputs]);
}
void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
Exprs[i + NumOutputs] = E;
}
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef GCCAsmStmt::getInputConstraint(unsigned i) const {
return getInputConstraintLiteral(i)->getString();
}
void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers) {
this->NumOutputs = NumOutputs;
this->NumInputs = NumInputs;
this->NumClobbers = NumClobbers;
unsigned NumExprs = NumOutputs + NumInputs;
C.Deallocate(this->Names);
this->Names = new (C) IdentifierInfo*[NumExprs];
std::copy(Names, Names + NumExprs, this->Names);
C.Deallocate(this->Exprs);
this->Exprs = new (C) Stmt*[NumExprs];
std::copy(Exprs, Exprs + NumExprs, this->Exprs);
C.Deallocate(this->Constraints);
this->Constraints = new (C) StringLiteral*[NumExprs];
std::copy(Constraints, Constraints + NumExprs, this->Constraints);
C.Deallocate(this->Clobbers);
this->Clobbers = new (C) StringLiteral*[NumClobbers];
std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers);
}
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
unsigned NumPlusOperands = 0;
// Check if this is an output operand.
for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
if (getOutputName(i) == SymbolicName)
return i;
}
for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
if (getInputName(i) == SymbolicName)
return getNumOutputs() + NumPlusOperands + i;
// Not found.
return -1;
}
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false.
unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
const ASTContext &C, unsigned &DiagOffs) const {
StringRef Str = getAsmString()->getString();
const char *StrStart = Str.begin();
const char *StrEnd = Str.end();
const char *CurPtr = StrStart;
// "Simple" inline asms have no constraints or operands, just convert the asm
// string to escape $'s.
if (isSimple()) {
std::string Result;
for (; CurPtr != StrEnd; ++CurPtr) {
switch (*CurPtr) {
case '$':
Result += "$$";
break;
default:
Result += *CurPtr;
break;
}
}
Pieces.push_back(AsmStringPiece(Result));
return 0;
}
// CurStringPiece - The current string that we are building up as we scan the
// asm string.
std::string CurStringPiece;
bool HasVariants = !C.getTargetInfo().hasNoAsmVariants();
while (1) {
// Done with the string?
if (CurPtr == StrEnd) {
if (!CurStringPiece.empty())
Pieces.push_back(AsmStringPiece(CurStringPiece));
return 0;
}
char CurChar = *CurPtr++;
switch (CurChar) {
case '$': CurStringPiece += "$$"; continue;
case '{': CurStringPiece += (HasVariants ? "$(" : "{"); continue;
case '|': CurStringPiece += (HasVariants ? "$|" : "|"); continue;
case '}': CurStringPiece += (HasVariants ? "$)" : "}"); continue;
case '%':
break;
default:
CurStringPiece += CurChar;
continue;
}
// Escaped "%" character in asm string.
if (CurPtr == StrEnd) {
// % at end of string is invalid (no escape).
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_escape;
}
char EscapedChar = *CurPtr++;
if (EscapedChar == '%') { // %% -> %
// Escaped percentage sign.
CurStringPiece += '%';
continue;
}
if (EscapedChar == '=') { // %= -> Generate an unique ID.
CurStringPiece += "${:uid}";
continue;
}
// Otherwise, we have an operand. If we have accumulated a string so far,
// add it to the Pieces list.
if (!CurStringPiece.empty()) {
Pieces.push_back(AsmStringPiece(CurStringPiece));
CurStringPiece.clear();
}
// Handle operands that have asmSymbolicName (e.g., %x[foo]) and those that
// don't (e.g., %x4). 'x' following the '%' is the constraint modifier.
const char *Begin = CurPtr - 1; // Points to the character following '%'.
const char *Percent = Begin - 1; // Points to '%'.
if (isLetter(EscapedChar)) {
if (CurPtr == StrEnd) { // Premature end.
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_escape;
}
EscapedChar = *CurPtr++;
}
const TargetInfo &TI = C.getTargetInfo();
const SourceManager &SM = C.getSourceManager();
const LangOptions &LO = C.getLangOpts();
// Handle operands that don't have asmSymbolicName (e.g., %x4).
if (isDigit(EscapedChar)) {
// %n - Assembler operand n
unsigned N = 0;
--CurPtr;
while (CurPtr != StrEnd && isDigit(*CurPtr))
N = N*10 + ((*CurPtr++)-'0');
unsigned NumOperands =
getNumOutputs() + getNumPlusOperands() + getNumInputs();
if (N >= NumOperands) {
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_operand_number;
}
// Str contains "x4" (Operand without the leading %).
std::string Str(Begin, CurPtr - Begin);
// (BeginLoc, EndLoc) represents the range of the operand we are currently
// processing. Unlike Str, the range includes the leading '%'.
SourceLocation BeginLoc =
getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
SourceLocation EndLoc =
getAsmString()->getLocationOfByte(CurPtr - StrStart, SM, LO, TI);
Pieces.emplace_back(N, std::move(Str), BeginLoc, EndLoc);
continue;
}
// Handle operands that have asmSymbolicName (e.g., %x[foo]).
if (EscapedChar == '[') {
DiagOffs = CurPtr-StrStart-1;
// Find the ']'.
const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr);
if (NameEnd == nullptr)
return diag::err_asm_unterminated_symbolic_operand_name;
if (NameEnd == CurPtr)
return diag::err_asm_empty_symbolic_operand_name;
StringRef SymbolicName(CurPtr, NameEnd - CurPtr);
int N = getNamedOperand(SymbolicName);
if (N == -1) {
// Verify that an operand with that name exists.
DiagOffs = CurPtr-StrStart;
return diag::err_asm_unknown_symbolic_operand_name;
}
// Str contains "x[foo]" (Operand without the leading %).
std::string Str(Begin, NameEnd + 1 - Begin);
// (BeginLoc, EndLoc) represents the range of the operand we are currently
// processing. Unlike Str, the range includes the leading '%'.
SourceLocation BeginLoc =
getAsmString()->getLocationOfByte(Percent - StrStart, SM, LO, TI);
SourceLocation EndLoc =
getAsmString()->getLocationOfByte(NameEnd + 1 - StrStart, SM, LO, TI);
Pieces.emplace_back(N, std::move(Str), BeginLoc, EndLoc);
CurPtr = NameEnd+1;
continue;
}
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_escape;
}
}
/// Assemble final IR asm string (GCC-style).
std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
SmallVector<GCCAsmStmt::AsmStringPiece, 4> Pieces;
unsigned DiagOffs;
AnalyzeAsmString(Pieces, C, DiagOffs);
std::string AsmString;
for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
if (Pieces[i].isString())
AsmString += Pieces[i].getString();
else if (Pieces[i].getModifier() == '\0')
AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
else
AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
Pieces[i].getModifier() + '}';
}
return AsmString;
}
/// Assemble final IR asm string (MS-style).
std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
// FIXME: This needs to be translated into the IR string representation.
return AsmStr;
}
Expr *MSAsmStmt::getOutputExpr(unsigned i) {
return cast<Expr>(Exprs[i]);
}
Expr *MSAsmStmt::getInputExpr(unsigned i) {
return cast<Expr>(Exprs[i + NumOutputs]);
}
void MSAsmStmt::setInputExpr(unsigned i, Expr *E) {
Exprs[i + NumOutputs] = E;
}
QualType CXXCatchStmt::getCaughtType() const {
if (ExceptionDecl)
return ExceptionDecl->getType();
return QualType();
}
//===----------------------------------------------------------------------===//
// Constructors
//===----------------------------------------------------------------------===//
GCCAsmStmt::GCCAsmStmt(const ASTContext &C, SourceLocation asmloc,
bool issimple, bool isvolatile, unsigned numoutputs,
unsigned numinputs, IdentifierInfo **names,
StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc)
: AsmStmt(GCCAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
numinputs, numclobbers), RParenLoc(rparenloc), AsmStr(asmstr) {
unsigned NumExprs = NumOutputs + NumInputs;
Names = new (C) IdentifierInfo*[NumExprs];
std::copy(names, names + NumExprs, Names);
Exprs = new (C) Stmt*[NumExprs];
std::copy(exprs, exprs + NumExprs, Exprs);
Constraints = new (C) StringLiteral*[NumExprs];
std::copy(constraints, constraints + NumExprs, Constraints);
Clobbers = new (C) StringLiteral*[NumClobbers];
std::copy(clobbers, clobbers + NumClobbers, Clobbers);
}
MSAsmStmt::MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs,
unsigned numinputs,
ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs,
StringRef asmstr, ArrayRef<StringRef> clobbers,
SourceLocation endloc)
: AsmStmt(MSAsmStmtClass, asmloc, issimple, isvolatile, numoutputs,
numinputs, clobbers.size()), LBraceLoc(lbraceloc),
EndLoc(endloc), NumAsmToks(asmtoks.size()) {
initialize(C, asmstr, asmtoks, constraints, exprs, clobbers);
}
static StringRef copyIntoContext(const ASTContext &C, StringRef str) {
if (str.empty())
return StringRef();
size_t size = str.size();
char *buffer = new (C) char[size];
memcpy(buffer, str.data(), size);
return StringRef(buffer, size);
}
void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
ArrayRef<Token> asmtoks,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs,
ArrayRef<StringRef> clobbers) {
assert(NumAsmToks == asmtoks.size());
assert(NumClobbers == clobbers.size());
unsigned NumExprs = exprs.size();
assert(NumExprs == NumOutputs + NumInputs);
assert(NumExprs == constraints.size());
AsmStr = copyIntoContext(C, asmstr);
Exprs = new (C) Stmt*[NumExprs];
for (unsigned i = 0, e = NumExprs; i != e; ++i)
Exprs[i] = exprs[i];
AsmToks = new (C) Token[NumAsmToks];
for (unsigned i = 0, e = NumAsmToks; i != e; ++i)
AsmToks[i] = asmtoks[i];
Constraints = new (C) StringRef[NumExprs];
for (unsigned i = 0, e = NumExprs; i != e; ++i) {
Constraints[i] = copyIntoContext(C, constraints[i]);
}
Clobbers = new (C) StringRef[NumClobbers];
for (unsigned i = 0, e = NumClobbers; i != e; ++i) {
// FIXME: Avoid the allocation/copy if at all possible.
Clobbers[i] = copyIntoContext(C, clobbers[i]);
}
}
ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
Stmt *Body, SourceLocation FCL,
SourceLocation RPL)
: Stmt(ObjCForCollectionStmtClass) {
SubExprs[ELEM] = Elem;
SubExprs[COLLECTION] = Collect;
SubExprs[BODY] = Body;
ForLoc = FCL;
RParenLoc = RPL;
}
ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
Stmt **CatchStmts, unsigned NumCatchStmts,
Stmt *atFinallyStmt)
: Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != nullptr) {
Stmt **Stmts = getStmts();
Stmts[0] = atTryStmt;
for (unsigned I = 0; I != NumCatchStmts; ++I)
Stmts[I + 1] = CatchStmts[I];
if (HasFinally)
Stmts[NumCatchStmts + 1] = atFinallyStmt;
}
ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
SourceLocation atTryLoc,
Stmt *atTryStmt,
Stmt **CatchStmts,
unsigned NumCatchStmts,
Stmt *atFinallyStmt) {
unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
atFinallyStmt);
}
ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
unsigned NumCatchStmts,
bool HasFinally) {
unsigned Size = sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
SourceLocation ObjCAtTryStmt::getLocEnd() const {
if (HasFinally)
return getFinallyStmt()->getLocEnd();
if (NumCatchStmts)
return getCatchStmt(NumCatchStmts - 1)->getLocEnd();
return getTryBody()->getLocEnd();
}
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
Stmt *tryBlock, ArrayRef<Stmt*> handlers) {
std::size_t Size = sizeof(CXXTryStmt);
Size += ((handlers.size() + 1) * sizeof(Stmt));
void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
}
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
unsigned numHandlers) {
std::size_t Size = sizeof(CXXTryStmt);
Size += ((numHandlers + 1) * sizeof(Stmt));
void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
ArrayRef<Stmt*> handlers)
: Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
Stmts[0] = tryBlock;
std::copy(handlers.begin(), handlers.end(), Stmts + 1);
}
CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
Stmt *Body, SourceLocation FL,
SourceLocation CL, SourceLocation RPL)
: Stmt(CXXForRangeStmtClass), ForLoc(FL), ColonLoc(CL), RParenLoc(RPL) {
SubExprs[RANGE] = Range;
SubExprs[BEGINEND] = BeginEndStmt;
SubExprs[COND] = Cond;
SubExprs[INC] = Inc;
SubExprs[LOOPVAR] = LoopVar;
SubExprs[BODY] = Body;
}
Expr *CXXForRangeStmt::getRangeInit() {
DeclStmt *RangeStmt = getRangeStmt();
VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
assert(RangeDecl && "for-range should have a single var decl");
return RangeDecl->getInit();
}
const Expr *CXXForRangeStmt::getRangeInit() const {
return const_cast<CXXForRangeStmt*>(this)->getRangeInit();
}
VarDecl *CXXForRangeStmt::getLoopVariable() {
Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
assert(LV && "No loop variable in CXXForRangeStmt");
return cast<VarDecl>(LV);
}
const VarDecl *CXXForRangeStmt::getLoopVariable() const {
return const_cast<CXXForRangeStmt*>(this)->getLoopVariable();
}
IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL, Stmt *elsev)
: Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL), MergeLoc(SourceLocation()) {
setConditionVariable(C, var);
SubExprs[COND] = cond;
SubExprs[THEN] = then;
SubExprs[ELSE] = elsev;
}
VarDecl *IfStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
if (!V) {
SubExprs[VAR] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
: Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
{
SubExprs[INIT] = Init;
setConditionVariable(C, condVar);
SubExprs[COND] = Cond;
SubExprs[INC] = Inc;
SubExprs[BODY] = Body;
}
VarDecl *ForStmt::getConditionVariable() const {
if (!SubExprs[CONDVAR])
return nullptr;
DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
if (!V) {
SubExprs[CONDVAR] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
SwitchStmt::SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond)
: Stmt(SwitchStmtClass), FirstCase(nullptr, false) {
setConditionVariable(C, Var);
SubExprs[COND] = cond;
SubExprs[BODY] = nullptr;
}
VarDecl *SwitchStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
void SwitchStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
if (!V) {
SubExprs[VAR] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
Stmt *SwitchCase::getSubStmt() {
if (isa<CaseStmt>(this))
return cast<CaseStmt>(this)->getSubStmt();
return cast<DefaultStmt>(this)->getSubStmt();
}
WhileStmt::WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL)
: Stmt(WhileStmtClass) {
setConditionVariable(C, Var);
SubExprs[COND] = cond;
SubExprs[BODY] = body;
WhileLoc = WL;
}
VarDecl *WhileStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
if (!V) {
SubExprs[VAR] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
VarRange.getEnd());
}
// IndirectGotoStmt
LabelDecl *IndirectGotoStmt::getConstantTarget() {
if (AddrLabelExpr *E =
dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
return E->getLabel();
return nullptr;
}
// ReturnStmt
const Expr* ReturnStmt::getRetValue() const {
return cast_or_null<Expr>(RetExpr);
}
Expr* ReturnStmt::getRetValue() {
return cast_or_null<Expr>(RetExpr);
}
SEHTryStmt::SEHTryStmt(bool IsCXXTry,
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler)
: Stmt(SEHTryStmtClass),
IsCXXTry(IsCXXTry),
TryLoc(TryLoc)
{
Children[TRY] = TryBlock;
Children[HANDLER] = Handler;
}
SEHTryStmt* SEHTryStmt::Create(const ASTContext &C, bool IsCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler) {
return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler);
}
SEHExceptStmt* SEHTryStmt::getExceptHandler() const {
return dyn_cast<SEHExceptStmt>(getHandler());
}
SEHFinallyStmt* SEHTryStmt::getFinallyHandler() const {
return dyn_cast<SEHFinallyStmt>(getHandler());
}
SEHExceptStmt::SEHExceptStmt(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block)
: Stmt(SEHExceptStmtClass),
Loc(Loc)
{
Children[FILTER_EXPR] = FilterExpr;
Children[BLOCK] = Block;
}
SEHExceptStmt* SEHExceptStmt::Create(const ASTContext &C, SourceLocation Loc,
Expr *FilterExpr, Stmt *Block) {
return new(C) SEHExceptStmt(Loc,FilterExpr,Block);
}
SEHFinallyStmt::SEHFinallyStmt(SourceLocation Loc,
Stmt *Block)
: Stmt(SEHFinallyStmtClass),
Loc(Loc),
Block(Block)
{}
SEHFinallyStmt* SEHFinallyStmt::Create(const ASTContext &C, SourceLocation Loc,
Stmt *Block) {
return new(C)SEHFinallyStmt(Loc,Block);
}
CapturedStmt::Capture *CapturedStmt::getStoredCaptures() const {
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
// Offset of the first Capture object.
unsigned FirstCaptureOffset =
llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
return reinterpret_cast<Capture *>(
reinterpret_cast<char *>(const_cast<CapturedStmt *>(this))
+ FirstCaptureOffset);
}
CapturedStmt::CapturedStmt(Stmt *S, CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD,
RecordDecl *RD)
: Stmt(CapturedStmtClass), NumCaptures(Captures.size()),
CapDeclAndKind(CD, Kind), TheRecordDecl(RD) {
assert( S && "null captured statement");
assert(CD && "null captured declaration for captured statement");
assert(RD && "null record declaration for captured statement");
// Copy initialization expressions.
Stmt **Stored = getStoredStmts();
for (unsigned I = 0, N = NumCaptures; I != N; ++I)
*Stored++ = CaptureInits[I];
// Copy the statement being captured.
*Stored = S;
// Copy all Capture objects.
Capture *Buffer = getStoredCaptures();
std::copy(Captures.begin(), Captures.end(), Buffer);
}
CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures)
: Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures),
CapDeclAndKind(nullptr, CR_Default), TheRecordDecl(nullptr) {
getStoredStmts()[NumCaptures] = nullptr;
}
CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD,
RecordDecl *RD) {
// The layout is
//
// -----------------------------------------------------------
// | CapturedStmt, Init, ..., Init, S, Capture, ..., Capture |
// ----------------^-------------------^----------------------
// getStoredStmts() getStoredCaptures()
//
// where S is the statement being captured.
//
assert(CaptureInits.size() == Captures.size() && "wrong number of arguments");
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (Captures.size() + 1);
if (!Captures.empty()) {
// Realign for the following Capture array.
Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
Size += sizeof(Capture) * Captures.size();
}
void *Mem = Context.Allocate(Size);
return new (Mem) CapturedStmt(S, Kind, Captures, CaptureInits, CD, RD);
}
CapturedStmt *CapturedStmt::CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures) {
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
if (NumCaptures > 0) {
// Realign for the following Capture array.
Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<Capture>());
Size += sizeof(Capture) * NumCaptures;
}
void *Mem = Context.Allocate(Size);
return new (Mem) CapturedStmt(EmptyShell(), NumCaptures);
}
Stmt::child_range CapturedStmt::children() {
// Children are captured field initilizers.
return child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
}
bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
for (const auto &I : captures()) {
if (!I.capturesVariable())
continue;
// This does not handle variable redeclarations. This should be
// extended to capture variables with redeclarations, for example
// a thread-private variable in OpenMP.
if (I.getCapturedVar() == Var)
return true;
}
return false;
}
StmtRange OMPClause::children() {
switch(getClauseKind()) {
default : break;
#define OPENMP_CLAUSE(Name, Class) \
case OMPC_ ## Name : return static_cast<Class *>(this)->children();
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("unknown OMPClause");
}
void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(), varlist_end());
}
OMPPrivateClause *
OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
// Allocate space for private variables and initializer expressions.
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
llvm::alignOf<Expr *>()) +
2 * sizeof(Expr *) * VL.size());
OMPPrivateClause *Clause =
new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setPrivateCopies(PrivateVL);
return Clause;
}
OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
llvm::alignOf<Expr *>()) +
2 * sizeof(Expr *) * N);
return new (Mem) OMPPrivateClause(N);
}
void OMPFirstprivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(), varlist_end());
}
void OMPFirstprivateClause::setInits(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of inits is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
}
OMPFirstprivateClause *
OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
llvm::alignOf<Expr *>()) +
3 * sizeof(Expr *) * VL.size());
OMPFirstprivateClause *Clause =
new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setPrivateCopies(PrivateVL);
Clause->setInits(InitVL);
return Clause;
}
OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
llvm::alignOf<Expr *>()) +
3 * sizeof(Expr *) * N);
return new (Mem) OMPFirstprivateClause(N);
}
void OMPLastprivateClause::setPrivateCopies(ArrayRef<Expr *> PrivateCopies) {
assert(PrivateCopies.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
std::copy(PrivateCopies.begin(), PrivateCopies.end(), varlist_end());
}
void OMPLastprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
"not the same as the "
"preallocated buffer");
std::copy(SrcExprs.begin(), SrcExprs.end(), getPrivateCopies().end());
}
void OMPLastprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
assert(DstExprs.size() == varlist_size() && "Number of destination "
"expressions is not the same as "
"the preallocated buffer");
std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
}
void OMPLastprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
assert(AssignmentOps.size() == varlist_size() &&
"Number of assignment expressions is not the same as the preallocated "
"buffer");
std::copy(AssignmentOps.begin(), AssignmentOps.end(),
getDestinationExprs().end());
}
OMPLastprivateClause *OMPLastprivateClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
llvm::alignOf<Expr *>()) +
5 * sizeof(Expr *) * VL.size());
OMPLastprivateClause *Clause =
new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setSourceExprs(SrcExprs);
Clause->setDestinationExprs(DstExprs);
Clause->setAssignmentOps(AssignmentOps);
return Clause;
}
OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
llvm::alignOf<Expr *>()) +
5 * sizeof(Expr *) * N);
return new (Mem) OMPLastprivateClause(N);
}
OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * VL.size());
OMPSharedClause *Clause = new (Mem) OMPSharedClause(StartLoc, LParenLoc,
EndLoc, VL.size());
Clause->setVarRefs(VL);
return Clause;
}
OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * N);
return new (Mem) OMPSharedClause(N);
}
void OMPLinearClause::setInits(ArrayRef<Expr *> IL) {
assert(IL.size() == varlist_size() &&
"Number of inits is not the same as the preallocated buffer");
std::copy(IL.begin(), IL.end(), varlist_end());
}
void OMPLinearClause::setUpdates(ArrayRef<Expr *> UL) {
assert(UL.size() == varlist_size() &&
"Number of updates is not the same as the preallocated buffer");
std::copy(UL.begin(), UL.end(), getInits().end());
}
void OMPLinearClause::setFinals(ArrayRef<Expr *> FL) {
assert(FL.size() == varlist_size() &&
"Number of final updates is not the same as the preallocated buffer");
std::copy(FL.begin(), FL.end(), getUpdates().end());
}
OMPLinearClause *
OMPLinearClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
// (Step and CalcStep).
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
llvm::alignOf<Expr *>()) +
(4 * VL.size() + 2) * sizeof(Expr *));
OMPLinearClause *Clause = new (Mem)
OMPLinearClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setInits(IL);
// Fill update and final expressions with zeroes, they are provided later,
// after the directive construction.
std::fill(Clause->getInits().end(), Clause->getInits().end() + VL.size(),
nullptr);
std::fill(Clause->getUpdates().end(), Clause->getUpdates().end() + VL.size(),
nullptr);
Clause->setStep(Step);
Clause->setCalcStep(CalcStep);
return Clause;
}
OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
unsigned NumVars) {
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
// (Step and CalcStep).
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
llvm::alignOf<Expr *>()) +
(4 * NumVars + 2) * sizeof(Expr *));
return new (Mem) OMPLinearClause(NumVars);
}
OMPAlignedClause *
OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * (VL.size() + 1));
OMPAlignedClause *Clause = new (Mem)
OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setAlignment(A);
return Clause;
}
OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
unsigned NumVars) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * (NumVars + 1));
return new (Mem) OMPAlignedClause(NumVars);
}
void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
"not the same as the "
"preallocated buffer");
std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
}
void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
assert(DstExprs.size() == varlist_size() && "Number of destination "
"expressions is not the same as "
"the preallocated buffer");
std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
}
void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
assert(AssignmentOps.size() == varlist_size() &&
"Number of assignment expressions is not the same as the preallocated "
"buffer");
std::copy(AssignmentOps.begin(), AssignmentOps.end(),
getDestinationExprs().end());
}
OMPCopyinClause *OMPCopyinClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * VL.size());
OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc,
EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setSourceExprs(SrcExprs);
Clause->setDestinationExprs(DstExprs);
Clause->setAssignmentOps(AssignmentOps);
return Clause;
}
OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * N);
return new (Mem) OMPCopyinClause(N);
}
void OMPCopyprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
"not the same as the "
"preallocated buffer");
std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end());
}
void OMPCopyprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) {
assert(DstExprs.size() == varlist_size() && "Number of destination "
"expressions is not the same as "
"the preallocated buffer");
std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end());
}
void OMPCopyprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) {
assert(AssignmentOps.size() == varlist_size() &&
"Number of assignment expressions is not the same as the preallocated "
"buffer");
std::copy(AssignmentOps.begin(), AssignmentOps.end(),
getDestinationExprs().end());
}
OMPCopyprivateClause *OMPCopyprivateClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * VL.size());
OMPCopyprivateClause *Clause =
new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setSourceExprs(SrcExprs);
Clause->setDestinationExprs(DstExprs);
Clause->setAssignmentOps(AssignmentOps);
return Clause;
}
OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * N);
return new (Mem) OMPCopyprivateClause(N);
}
void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
assert(Clauses.size() == getNumClauses() &&
"Number of clauses is not the same as the preallocated buffer");
std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of loop counters is not the same as the collapsed number");
std::copy(A.begin(), A.end(), getCounters().begin());
}
void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter inits is not the same as the collapsed number");
std::copy(A.begin(), A.end(), getInits().begin());
}
void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter updates is not the same as the collapsed number");
std::copy(A.begin(), A.end(), getUpdates().begin());
}
void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
assert(A.size() == getCollapsedNumber() &&
"Number of counter finals is not the same as the collapsed number");
std::copy(A.begin(), A.end(), getFinals().begin());
}
void OMPReductionClause::setLHSExprs(ArrayRef<Expr *> LHSExprs) {
assert(
LHSExprs.size() == varlist_size() &&
"Number of LHS expressions is not the same as the preallocated buffer");
std::copy(LHSExprs.begin(), LHSExprs.end(), varlist_end());
}
void OMPReductionClause::setRHSExprs(ArrayRef<Expr *> RHSExprs) {
assert(
RHSExprs.size() == varlist_size() &&
"Number of RHS expressions is not the same as the preallocated buffer");
std::copy(RHSExprs.begin(), RHSExprs.end(), getLHSExprs().end());
}
void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
assert(ReductionOps.size() == varlist_size() && "Number of reduction "
"expressions is not the same "
"as the preallocated buffer");
std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
}
OMPReductionClause *OMPReductionClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * VL.size());
OMPReductionClause *Clause = new (Mem) OMPReductionClause(
StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
Clause->setVarRefs(VL);
Clause->setLHSExprs(LHSExprs);
Clause->setRHSExprs(RHSExprs);
Clause->setReductionOps(ReductionOps);
return Clause;
}
OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
llvm::alignOf<Expr *>()) +
4 * sizeof(Expr *) * N);
return new (Mem) OMPReductionClause(N);
}
OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * VL.size());
OMPFlushClause *Clause =
new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
return Clause;
}
OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * N);
return new (Mem) OMPFlushClause(N);
}
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * VL.size());
OMPDependClause *Clause =
new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setDependencyKind(DepKind);
Clause->setDependencyLoc(DepLoc);
Clause->setColonLoc(ColonLoc);
return Clause;
}
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
llvm::alignOf<Expr *>()) +
sizeof(Expr *) * N);
return new (Mem) OMPDependClause(N);
}
const OMPClause *
OMPExecutableDirective::getSingleClause(OpenMPClauseKind K) const {
auto &&I = getClausesOfKind(K);
if (I) {
auto *Clause = *I;
assert(!++I && "There are at least 2 clauses of the specified kind");
return Clause;
}
return nullptr;
}
OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *));
OMPParallelDirective *Dir = new (Mem) OMPParallelDirective(StartLoc, EndLoc,
Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *));
return new (Mem) OMPParallelDirective(NumClauses);
}
OMPSimdDirective *
OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
OMPSimdDirective *Dir = new (Mem)
OMPSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setCounters(Exprs.Counters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
}
OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
}
OMPForDirective *
OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
OMPForDirective *Dir =
new (Mem) OMPForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setIsLastIterVariable(Exprs.IL);
Dir->setLowerBoundVariable(Exprs.LB);
Dir->setUpperBoundVariable(Exprs.UB);
Dir->setStrideVariable(Exprs.ST);
Dir->setEnsureUpperBound(Exprs.EUB);
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
}
OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
return new (Mem) OMPForDirective(CollapsedNum, NumClauses);
}
OMPForSimdDirective *
OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
OMPForSimdDirective *Dir = new (Mem)
OMPForSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setIsLastIterVariable(Exprs.IL);
Dir->setLowerBoundVariable(Exprs.LB);
Dir->setUpperBoundVariable(Exprs.UB);
Dir->setStrideVariable(Exprs.ST);
Dir->setEnsureUpperBound(Exprs.EUB);
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
}
OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPForSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
return new (Mem) OMPForSimdDirective(CollapsedNum, NumClauses);
}
OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPSectionsDirective *Dir =
new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPSectionsDirective(NumClauses);
}
OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSectionDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPSectionDirective();
}
OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPSingleDirective *Dir =
new (Mem) OMPSingleDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSingleDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPSingleDirective(NumClauses);
}
OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPMasterDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPMasterDirective();
}
OMPCriticalDirective *OMPCriticalDirective::Create(
const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPCriticalDirective *Dir =
new (Mem) OMPCriticalDirective(Name, StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCriticalDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPCriticalDirective();
}
OMPParallelForDirective *OMPParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_parallel_for));
OMPParallelForDirective *Dir = new (Mem)
OMPParallelForDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setIsLastIterVariable(Exprs.IL);
Dir->setLowerBoundVariable(Exprs.LB);
Dir->setUpperBoundVariable(Exprs.UB);
Dir->setStrideVariable(Exprs.ST);
Dir->setEnsureUpperBound(Exprs.EUB);
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
}
OMPParallelForDirective *
OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_parallel_for));
return new (Mem) OMPParallelForDirective(CollapsedNum, NumClauses);
}
OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
OMPParallelForSimdDirective *Dir = new (Mem) OMPParallelForSimdDirective(
StartLoc, EndLoc, CollapsedNum, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setIterationVariable(Exprs.IterationVarRef);
Dir->setLastIteration(Exprs.LastIteration);
Dir->setCalcLastIteration(Exprs.CalcLastIteration);
Dir->setPreCond(Exprs.PreCond);
Dir->setCond(Exprs.Cond);
Dir->setInit(Exprs.Init);
Dir->setInc(Exprs.Inc);
Dir->setIsLastIterVariable(Exprs.IL);
Dir->setLowerBoundVariable(Exprs.LB);
Dir->setUpperBoundVariable(Exprs.UB);
Dir->setStrideVariable(Exprs.ST);
Dir->setEnsureUpperBound(Exprs.EUB);
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setCounters(Exprs.Counters);
Dir->setInits(Exprs.Inits);
Dir->setUpdates(Exprs.Updates);
Dir->setFinals(Exprs.Finals);
return Dir;
}
OMPParallelForSimdDirective *
OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelForSimdDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
return new (Mem) OMPParallelForSimdDirective(CollapsedNum, NumClauses);
}
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPParallelSectionsDirective *Dir =
new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPParallelSectionsDirective *
OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelSectionsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPParallelSectionsDirective(NumClauses);
}
OMPTaskDirective *OMPTaskDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTaskDirective *Dir =
new (Mem) OMPTaskDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTaskDirective(NumClauses);
}
OMPTaskyieldDirective *OMPTaskyieldDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
OMPTaskyieldDirective *Dir =
new (Mem) OMPTaskyieldDirective(StartLoc, EndLoc);
return Dir;
}
OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
void *Mem = C.Allocate(sizeof(OMPTaskyieldDirective));
return new (Mem) OMPTaskyieldDirective();
}
OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
OMPBarrierDirective *Dir = new (Mem) OMPBarrierDirective(StartLoc, EndLoc);
return Dir;
}
OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
void *Mem = C.Allocate(sizeof(OMPBarrierDirective));
return new (Mem) OMPBarrierDirective();
}
OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
OMPTaskwaitDirective *Dir = new (Mem) OMPTaskwaitDirective(StartLoc, EndLoc);
return Dir;
}
OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
void *Mem = C.Allocate(sizeof(OMPTaskwaitDirective));
return new (Mem) OMPTaskwaitDirective();
}
OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPTaskgroupDirective *Dir =
new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTaskgroupDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPTaskgroupDirective();
}
OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion) {
unsigned Size = llvm::RoundUpToAlignment(
sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size);
OMPCancellationPointDirective *Dir =
new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
Dir->setCancelRegion(CancelRegion);
return Dir;
}
OMPCancellationPointDirective *
OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(
sizeof(OMPCancellationPointDirective), llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size);
return new (Mem) OMPCancellationPointDirective();
}
OMPCancelDirective *
OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size);
OMPCancelDirective *Dir = new (Mem) OMPCancelDirective(StartLoc, EndLoc);
Dir->setCancelRegion(CancelRegion);
return Dir;
}
OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPCancelDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size);
return new (Mem) OMPCancelDirective();
}
OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
OMPFlushDirective *Dir =
new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
return Dir;
}
OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPFlushDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
return new (Mem) OMPFlushDirective(NumClauses);
}
OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPOrderedDirective *Dir = new (Mem) OMPOrderedDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPOrderedDirective),
llvm::alignOf<Stmt *>());
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPOrderedDirective();
}
OMPAtomicDirective *OMPAtomicDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
llvm::alignOf<OMPClause *>());
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
5 * sizeof(Stmt *));
OMPAtomicDirective *Dir =
new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
Dir->setX(X);
Dir->setV(V);
Dir->setExpr(E);
Dir->setUpdateExpr(UE);
Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
Dir->IsPostfixUpdate = IsPostfixUpdate;
return Dir;
}
OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
return new (Mem) OMPAtomicDirective(NumClauses);
}
OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTargetDirective *Dir =
new (Mem) OMPTargetDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTargetDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTargetDirective(NumClauses);
}
OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTeamsDirective *Dir =
new (Mem) OMPTeamsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
return Dir;
}
OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPTeamsDirective),
llvm::alignOf<OMPClause *>());
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTeamsDirective(NumClauses);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.